diff mbox series

[09/26] mm: Provide a means of invalidation without using launder_folio

Message ID 20240328163424.2781320-10-dhowells@redhat.com
State New
Headers show
Series netfs, afs, 9p, cifs: Rework netfs to use ->writepages() to copy to cache | expand

Commit Message

David Howells March 28, 2024, 4:34 p.m. UTC
Implement a replacement for launder_folio.  The key feature of
invalidate_inode_pages2() is that it locks each folio individually, unmaps
it to prevent mmap'd accesses interfering and calls the ->launder_folio()
address_space op to flush it.  This has problems: firstly, each folio is
written individually as one or more small writes; secondly, adjacent folios
cannot be added so easily into the laundry; thirdly, it's yet another op to
implement.

Instead, use the invalidate lock to cause anyone wanting to add a folio to
the inode to wait, then unmap all the folios if we have mmaps, then,
conditionally, use ->writepages() to flush any dirty data back and then
discard all pages.

The invalidate lock prevents ->read_iter(), ->write_iter() and faulting
through mmap all from adding pages for the duration.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Matthew Wilcox <willy@infradead.org>
cc: Miklos Szeredi <miklos@szeredi.hu>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Christoph Hellwig <hch@lst.de>
cc: Andrew Morton <akpm@linux-foundation.org>
cc: Alexander Viro <viro@zeniv.linux.org.uk>
cc: Christian Brauner <brauner@kernel.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-mm@kvack.org
cc: linux-fsdevel@vger.kernel.org
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: devel@lists.orangefs.org
---
 include/linux/pagemap.h |  1 +
 mm/filemap.c            | 46 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 47 insertions(+)

Comments

Jeff Layton April 15, 2024, 11:41 a.m. UTC | #1
On Thu, 2024-03-28 at 16:34 +0000, David Howells wrote:
> Implement a replacement for launder_folio.  The key feature of
> invalidate_inode_pages2() is that it locks each folio individually, unmaps
> it to prevent mmap'd accesses interfering and calls the ->launder_folio()
> address_space op to flush it.  This has problems: firstly, each folio is
> written individually as one or more small writes; secondly, adjacent folios
> cannot be added so easily into the laundry; thirdly, it's yet another op to
> implement.
> 
> Instead, use the invalidate lock to cause anyone wanting to add a folio to
> the inode to wait, then unmap all the folios if we have mmaps, then,
> conditionally, use ->writepages() to flush any dirty data back and then
> discard all pages.
> 
> The invalidate lock prevents ->read_iter(), ->write_iter() and faulting
> through mmap all from adding pages for the duration.
> 
> Signed-off-by: David Howells <dhowells@redhat.com>
> cc: Matthew Wilcox <willy@infradead.org>
> cc: Miklos Szeredi <miklos@szeredi.hu>
> cc: Trond Myklebust <trond.myklebust@hammerspace.com>
> cc: Christoph Hellwig <hch@lst.de>
> cc: Andrew Morton <akpm@linux-foundation.org>
> cc: Alexander Viro <viro@zeniv.linux.org.uk>
> cc: Christian Brauner <brauner@kernel.org>
> cc: Jeff Layton <jlayton@kernel.org>
> cc: linux-mm@kvack.org
> cc: linux-fsdevel@vger.kernel.org
> cc: netfs@lists.linux.dev
> cc: v9fs@lists.linux.dev
> cc: linux-afs@lists.infradead.org
> cc: ceph-devel@vger.kernel.org
> cc: linux-cifs@vger.kernel.org
> cc: linux-nfs@vger.kernel.org
> cc: devel@lists.orangefs.org
> ---
>  include/linux/pagemap.h |  1 +
>  mm/filemap.c            | 46 +++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 47 insertions(+)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 2df35e65557d..4eb3d4177a53 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -40,6 +40,7 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping);
>  int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
>  int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
>  		loff_t start_byte, loff_t end_byte);
> +int filemap_invalidate_inode(struct inode *inode, bool flush);
>  
>  static inline int filemap_fdatawait(struct address_space *mapping)
>  {
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 25983f0f96e3..087f685107a5 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -4134,6 +4134,52 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp)
>  }
>  EXPORT_SYMBOL(filemap_release_folio);
>  
> +/**
> + * filemap_invalidate_inode - Invalidate/forcibly write back an inode's pagecache
> + * @inode: The inode to flush
> + * @flush: Set to write back rather than simply invalidate.
> + *
> + * Invalidate all the folios on an inode, possibly writing them back first.
> + * Whilst the operation is undertaken, the invalidate lock is held to prevent
> + * new folios from being installed.
> + */
> +int filemap_invalidate_inode(struct inode *inode, bool flush)
> +{
> +	struct address_space *mapping = inode->i_mapping;
> +
> +	if (!mapping || !mapping->nrpages)
> +		goto out;
> +
> +	/* Prevent new folios from being added to the inode. */
> +	filemap_invalidate_lock(mapping);
> +
> +	if (!mapping->nrpages)
> +		goto unlock;
> +
> +	unmap_mapping_pages(mapping, 0, ULONG_MAX, false);
> +
> +	/* Write back the data if we're asked to. */
> +	if (flush) {
> +		struct writeback_control wbc = {
> +			.sync_mode	= WB_SYNC_ALL,
> +			.nr_to_write	= LONG_MAX,
> +			.range_start	= 0,
> +			.range_end	= LLONG_MAX,
> +		};
> +
> +		filemap_fdatawrite_wbc(mapping, &wbc);
> +	}
> +
> +	/* Wait for writeback to complete on all folios and discard. */
> +	truncate_inode_pages_range(mapping, 0, LLONG_MAX);
> +
> +unlock:
> +	filemap_invalidate_unlock(mapping);
> +out:
> +	return filemap_check_errors(mapping);
> +}
> +EXPORT_SYMBOL(filemap_invalidate_inode);
> +
>  #ifdef CONFIG_CACHESTAT_SYSCALL
>  /**
>   * filemap_cachestat() - compute the page cache statistics of a mapping
> 
> 

I'd have liked to have seen the first caller of this function too.
David Howells April 17, 2024, 9:02 a.m. UTC | #2
Jeff Layton <jlayton@kernel.org> wrote:

> I'd have liked to have seen the first caller of this function too.

Looking again at my code, I also need to make netfs_unbuffered_write_iter()
use this function too.  I missed it before because that wasn't calling
invalidate_inode_pages2_range() directly, but rather going through
kiocb_invalidate_pages().

So I'll add those changes into this patch to give you a first caller.  I also
then need to make filemap_invalidate_inode() take a byte range.

David
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2df35e65557d..4eb3d4177a53 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -40,6 +40,7 @@  int filemap_fdatawait_keep_errors(struct address_space *mapping);
 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
 		loff_t start_byte, loff_t end_byte);
+int filemap_invalidate_inode(struct inode *inode, bool flush);
 
 static inline int filemap_fdatawait(struct address_space *mapping)
 {
diff --git a/mm/filemap.c b/mm/filemap.c
index 25983f0f96e3..087f685107a5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4134,6 +4134,52 @@  bool filemap_release_folio(struct folio *folio, gfp_t gfp)
 }
 EXPORT_SYMBOL(filemap_release_folio);
 
+/**
+ * filemap_invalidate_inode - Invalidate/forcibly write back an inode's pagecache
+ * @inode: The inode to flush
+ * @flush: Set to write back rather than simply invalidate.
+ *
+ * Invalidate all the folios on an inode, possibly writing them back first.
+ * Whilst the operation is undertaken, the invalidate lock is held to prevent
+ * new folios from being installed.
+ */
+int filemap_invalidate_inode(struct inode *inode, bool flush)
+{
+	struct address_space *mapping = inode->i_mapping;
+
+	if (!mapping || !mapping->nrpages)
+		goto out;
+
+	/* Prevent new folios from being added to the inode. */
+	filemap_invalidate_lock(mapping);
+
+	if (!mapping->nrpages)
+		goto unlock;
+
+	unmap_mapping_pages(mapping, 0, ULONG_MAX, false);
+
+	/* Write back the data if we're asked to. */
+	if (flush) {
+		struct writeback_control wbc = {
+			.sync_mode	= WB_SYNC_ALL,
+			.nr_to_write	= LONG_MAX,
+			.range_start	= 0,
+			.range_end	= LLONG_MAX,
+		};
+
+		filemap_fdatawrite_wbc(mapping, &wbc);
+	}
+
+	/* Wait for writeback to complete on all folios and discard. */
+	truncate_inode_pages_range(mapping, 0, LLONG_MAX);
+
+unlock:
+	filemap_invalidate_unlock(mapping);
+out:
+	return filemap_check_errors(mapping);
+}
+EXPORT_SYMBOL(filemap_invalidate_inode);
+
 #ifdef CONFIG_CACHESTAT_SYSCALL
 /**
  * filemap_cachestat() - compute the page cache statistics of a mapping