[PATCH 06/11] netfs: Fix the trace displayed for the total overwrite of a streamed write

David Howells dhowells at redhat.com
Mon Apr 20 01:36:57 PDT 2026


Change netfs_perform_write() to carry the trace value in a variable and
emit it later to make it easier to select the value displayed.

Fix the display of netfs_streaming_write_cont in the "folio_now_filled:"
section.  It should only be set to that if we come from the section that
filled in a gap in an already streamed write (and would otherwise display
netfs_streaming_write_cont).  If it came from the bit that just overwrites
the entire folio (which would ordinarily display netfs_whole_folio_modify),
make it display netfs_whole_folio_modify_filled instead.

Fixes: 1bfeb53e990b ("netfs: Fix streaming write being overwritten")
Closes: https://sashiko.dev/#/patchset/20260414082004.3756080-1-dhowells%40redhat.com
Signed-off-by: David Howells <dhowells at redhat.com>
cc: Paulo Alcantara <pc at manguebit.org>
cc: Matthew Wilcox <willy at infradead.org>
cc: netfs at lists.linux.dev
cc: linux-fsdevel at vger.kernel.org
---
 fs/netfs/buffered_write.c    | 27 ++++++++++++++++-----------
 include/trace/events/netfs.h |  1 +
 2 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 6399141b4f0a..de82eb6ed473 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -150,6 +150,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 	}
 
 	do {
+		enum netfs_folio_trace trace;
 		struct netfs_folio *finfo;
 		struct netfs_group *group;
 		unsigned long long fpos;
@@ -223,7 +224,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			if (unlikely(copied == 0))
 				goto copy_failed;
 			netfs_set_group(folio, netfs_group);
-			trace_netfs_folio(folio, netfs_folio_is_uptodate);
+			trace = netfs_folio_is_uptodate;
 			goto copied;
 		}
 
@@ -239,7 +240,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			folio_zero_segment(folio, offset + copied, flen);
 			__netfs_set_group(folio, netfs_group);
 			folio_mark_uptodate(folio);
-			trace_netfs_folio(folio, netfs_modify_and_clear);
+			trace = netfs_modify_and_clear;
 			goto copied;
 		}
 
@@ -247,11 +248,13 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 		if (!maybe_trouble && offset == 0 && part >= flen) {
 			copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
 			if (likely(copied == part)) {
-				if (finfo)
+				if (finfo) {
+					trace = netfs_whole_folio_modify_filled;
 					goto folio_now_filled;
+				}
 				__netfs_set_group(folio, netfs_group);
 				folio_mark_uptodate(folio);
-				trace_netfs_folio(folio, netfs_whole_folio_modify);
+				trace = netfs_whole_folio_modify;
 				goto copied;
 			}
 			if (copied == 0)
@@ -273,7 +276,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			if (copied > finfo->dirty_len)
 				finfo->dirty_len = copied;
 			finfo->dirty_offset = 0;
-			trace_netfs_folio(folio, netfs_whole_folio_modify_efault);
+			trace = netfs_whole_folio_modify_efault;
 			goto copied;
 		}
 
@@ -299,7 +302,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			if (unlikely(copied == 0))
 				goto copy_failed;
 			netfs_set_group(folio, netfs_group);
-			trace_netfs_folio(folio, netfs_just_prefetch);
+			trace = netfs_just_prefetch;
 			goto copied;
 		}
 
@@ -313,7 +316,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			if (offset == 0 && copied == flen) {
 				__netfs_set_group(folio, netfs_group);
 				folio_mark_uptodate(folio);
-				trace_netfs_folio(folio, netfs_streaming_filled_page);
+				trace = netfs_streaming_filled_page;
 				goto copied;
 			}
 
@@ -328,7 +331,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			finfo->dirty_len = copied;
 			folio_attach_private(folio, (void *)((unsigned long)finfo |
 							     NETFS_FOLIO_INFO));
-			trace_netfs_folio(folio, netfs_streaming_write);
+			trace = netfs_streaming_write;
 			goto copied;
 		}
 
@@ -341,9 +344,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			if (unlikely(copied == 0))
 				goto copy_failed;
 			finfo->dirty_len += copied;
-			if (finfo->dirty_offset == 0 && finfo->dirty_len == flen)
+			if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
+				trace = netfs_streaming_cont_filled_page;
 				goto folio_now_filled;
-			trace_netfs_folio(folio, netfs_streaming_write_cont);
+			}
+			trace = netfs_streaming_write_cont;
 			goto copied;
 		}
 
@@ -364,8 +369,8 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 			folio_detach_private(folio);
 		folio_mark_uptodate(folio);
 		kfree(finfo);
-		trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
 	copied:
+		trace_netfs_folio(folio, trace);
 		flush_dcache_folio(folio);
 
 		/* Update the inode size if we moved the EOF marker */
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index ee7fea84f055..67f6d56c94ce 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -178,6 +178,7 @@
 	EM(netfs_just_prefetch,			"mod-prefetch")	\
 	EM(netfs_whole_folio_modify,		"mod-whole-f")	\
 	EM(netfs_whole_folio_modify_efault,	"mod-whole-f!")	\
+	EM(netfs_whole_folio_modify_filled,	"mod-whole-f+")	\
 	EM(netfs_modify_and_clear,		"mod-n-clear")	\
 	EM(netfs_streaming_write,		"mod-streamw")	\
 	EM(netfs_streaming_write_cont,		"mod-streamw+")	\




More information about the linux-afs mailing list