Message ID | 20231213152350.431591-11-dhowells@redhat.com |
---|---|
State | New |
Headers | show |
Series | netfs, afs, 9p: Delegate high-level I/O to netfslib | expand |
On Wed, 2023-12-13 at 15:23 +0000, David Howells wrote: > Provide default invalidate_folio and release_folio calls. These will need > to interact with invalidation correctly at some point. They will be needed > if netfslib is to make use of folio->private for its own purposes. > > Signed-off-by: David Howells <dhowells@redhat.com> > Reviewed-by: Jeff Layton <jlayton@kernel.org> > cc: linux-cachefs@redhat.com > cc: linux-fsdevel@vger.kernel.org > cc: linux-mm@kvack.org > --- > fs/9p/vfs_addr.c | 33 ++------------------------- > fs/afs/file.c | 53 ++++--------------------------------------- > fs/ceph/addr.c | 24 ++------------------ > fs/netfs/misc.c | 42 ++++++++++++++++++++++++++++++++++ > include/linux/netfs.h | 6 +++-- > 5 files changed, 54 insertions(+), 104 deletions(-) > > diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c > index 131b83c31f85..055b672a247d 100644 > --- a/fs/9p/vfs_addr.c > +++ b/fs/9p/vfs_addr.c > @@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = { > .issue_read = v9fs_issue_read, > }; > > -/** > - * v9fs_release_folio - release the private state associated with a folio > - * @folio: The folio to be released > - * @gfp: The caller's allocation restrictions > - * > - * Returns true if the page can be released, false otherwise. > - */ > - > -static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) > -{ > - if (folio_test_private(folio)) > - return false; > -#ifdef CONFIG_9P_FSCACHE > - if (folio_test_fscache(folio)) { > - if (current_is_kswapd() || !(gfp & __GFP_FS)) > - return false; > - folio_wait_fscache(folio); > - } > - fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio)))); > -#endif > - return true; > -} > - > -static void v9fs_invalidate_folio(struct folio *folio, size_t offset, > - size_t length) > -{ > - folio_wait_fscache(folio); > -} > - > #ifdef CONFIG_9P_FSCACHE > static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, > bool was_async) > @@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = { > .writepage = v9fs_vfs_writepage, > .write_begin = v9fs_write_begin, > .write_end = v9fs_write_end, > - .release_folio = v9fs_release_folio, > - .invalidate_folio = v9fs_invalidate_folio, > + .release_folio = netfs_release_folio, > + .invalidate_folio = netfs_invalidate_folio, > .launder_folio = v9fs_launder_folio, > .direct_IO = v9fs_direct_IO, > }; > diff --git a/fs/afs/file.c b/fs/afs/file.c > index 5e2bca3b02fd..c5013ec3c1dc 100644 > --- a/fs/afs/file.c > +++ b/fs/afs/file.c > @@ -20,9 +20,6 @@ > > static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); > static int afs_symlink_read_folio(struct file *file, struct folio *folio); > -static void afs_invalidate_folio(struct folio *folio, size_t offset, > - size_t length); > -static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags); > > static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); > static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, > @@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = { > .readahead = netfs_readahead, > .dirty_folio = netfs_dirty_folio, > .launder_folio = afs_launder_folio, > - .release_folio = afs_release_folio, > - .invalidate_folio = afs_invalidate_folio, > + .release_folio = netfs_release_folio, > + .invalidate_folio = netfs_invalidate_folio, > .write_begin = afs_write_begin, > .write_end = afs_write_end, > .writepages = afs_writepages, > @@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = { > > const struct address_space_operations afs_symlink_aops = { > .read_folio = afs_symlink_read_folio, > - .release_folio = afs_release_folio, > - .invalidate_folio = afs_invalidate_folio, > + .release_folio = netfs_release_folio, > + .invalidate_folio = netfs_invalidate_folio, > .migrate_folio = filemap_migrate_folio, > }; > > @@ -383,48 +380,6 @@ const struct netfs_request_ops afs_req_ops = { > .issue_read = afs_issue_read, > }; > > -/* > - * invalidate part or all of a page > - * - release a page and clean up its private data if offset is 0 (indicating > - * the entire page) > - */ > -static void afs_invalidate_folio(struct folio *folio, size_t offset, > - size_t length) > -{ > - _enter("{%lu},%zu,%zu", folio->index, offset, length); > - > - folio_wait_fscache(folio); > - _leave(""); > -} > - > -/* > - * release a page and clean up its private state if it's not busy > - * - return true if the page can now be released, false if not > - */ > -static bool afs_release_folio(struct folio *folio, gfp_t gfp) > -{ > - struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); > - > - _enter("{{%llx:%llu}[%lu],%lx},%x", > - vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags, > - gfp); > - > - /* deny if folio is being written to the cache and the caller hasn't > - * elected to wait */ > -#ifdef CONFIG_AFS_FSCACHE > - if (folio_test_fscache(folio)) { > - if (current_is_kswapd() || !(gfp & __GFP_FS)) > - return false; > - folio_wait_fscache(folio); > - } > - fscache_note_page_release(afs_vnode_cache(vnode)); > -#endif > - > - /* Indicate that the folio can be released */ > - _leave(" = T"); > - return true; > -} > - > static void afs_add_open_mmap(struct afs_vnode *vnode) > { > if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) { > diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c > index 654f408a0aca..500a87b68a9a 100644 > --- a/fs/ceph/addr.c > +++ b/fs/ceph/addr.c > @@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset, > ceph_put_snap_context(snapc); > } > > - folio_wait_fscache(folio); > -} > - > -static bool ceph_release_folio(struct folio *folio, gfp_t gfp) > -{ > - struct inode *inode = folio->mapping->host; > - struct ceph_client *cl = ceph_inode_to_client(inode); > - > - doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode), > - folio->index, folio_test_dirty(folio) ? "" : "not "); > - > - if (folio_test_private(folio)) > - return false; > - > - if (folio_test_fscache(folio)) { > - if (current_is_kswapd() || !(gfp & __GFP_FS)) > - return false; > - folio_wait_fscache(folio); > - } > - ceph_fscache_note_page_release(inode); I think this is the only call to ceph_fscache_note_page_release, so that can likely be removed as well. > - return true; > + netfs_invalidate_folio(folio, offset, length); > } > > static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) > @@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = { > .write_end = ceph_write_end, > .dirty_folio = ceph_dirty_folio, > .invalidate_folio = ceph_invalidate_folio, > - .release_folio = ceph_release_folio, > + .release_folio = netfs_release_folio, > .direct_IO = noop_direct_IO, > }; > > diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c > index 68baf55c47a4..d946d85764de 100644 > --- a/fs/netfs/misc.c > +++ b/fs/netfs/misc.c > @@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux) > } > } > EXPORT_SYMBOL(netfs_clear_inode_writeback); > + > +/* > + * netfs_invalidate_folio - Invalidate or partially invalidate a folio > + * @folio: Folio proposed for release > + * @offset: Offset of the invalidated region > + * @length: Length of the invalidated region > + * > + * Invalidate part or all of a folio for a network filesystem. The folio will > + * be removed afterwards if the invalidated region covers the entire folio. > + */ > +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) > +{ > + _enter("{%lx},%zx,%zx", folio_index(folio), offset, length); > + > + folio_wait_fscache(folio); > +} > +EXPORT_SYMBOL(netfs_invalidate_folio); > + > +/** > + * netfs_release_folio - Try to release a folio > + * @folio: Folio proposed for release > + * @gfp: Flags qualifying the release > + * > + * Request release of a folio and clean up its private state if it's not busy. > + * Returns true if the folio can now be released, false if not > + */ > +bool netfs_release_folio(struct folio *folio, gfp_t gfp) > +{ > + struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); > + > + if (folio_test_private(folio)) > + return false; > + if (folio_test_fscache(folio)) { > + if (current_is_kswapd() || !(gfp & __GFP_FS)) > + return false; > + folio_wait_fscache(folio); > + } > + > + fscache_note_page_release(netfs_i_cookie(ctx)); > + return true; > +} > +EXPORT_SYMBOL(netfs_release_folio); > diff --git a/include/linux/netfs.h b/include/linux/netfs.h > index 06f57d9d09f6..8efbfd3b2820 100644 > --- a/include/linux/netfs.h > +++ b/include/linux/netfs.h > @@ -293,11 +293,13 @@ struct readahead_control; > void netfs_readahead(struct readahead_control *); > int netfs_read_folio(struct file *, struct folio *); > int netfs_write_begin(struct netfs_inode *, struct file *, > - struct address_space *, loff_t pos, unsigned int len, > - struct folio **, void **fsdata); > + struct address_space *, loff_t pos, unsigned int len, > + struct folio **, void **fsdata); > bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); > int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); > void netfs_clear_inode_writeback(struct inode *inode, const void *aux); > +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); > +bool netfs_release_folio(struct folio *folio, gfp_t gfp); > > void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); > void netfs_get_subrequest(struct netfs_io_subrequest *subreq, >
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 131b83c31f85..055b672a247d 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = { .issue_read = v9fs_issue_read, }; -/** - * v9fs_release_folio - release the private state associated with a folio - * @folio: The folio to be released - * @gfp: The caller's allocation restrictions - * - * Returns true if the page can be released, false otherwise. - */ - -static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) -{ - if (folio_test_private(folio)) - return false; -#ifdef CONFIG_9P_FSCACHE - if (folio_test_fscache(folio)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - folio_wait_fscache(folio); - } - fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio)))); -#endif - return true; -} - -static void v9fs_invalidate_folio(struct folio *folio, size_t offset, - size_t length) -{ - folio_wait_fscache(folio); -} - #ifdef CONFIG_9P_FSCACHE static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, bool was_async) @@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = { .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, .write_end = v9fs_write_end, - .release_folio = v9fs_release_folio, - .invalidate_folio = v9fs_invalidate_folio, + .release_folio = netfs_release_folio, + .invalidate_folio = netfs_invalidate_folio, .launder_folio = v9fs_launder_folio, .direct_IO = v9fs_direct_IO, }; diff --git a/fs/afs/file.c b/fs/afs/file.c index 5e2bca3b02fd..c5013ec3c1dc 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -20,9 +20,6 @@ static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); static int afs_symlink_read_folio(struct file *file, struct folio *folio); -static void afs_invalidate_folio(struct folio *folio, size_t offset, - size_t length); -static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, @@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = { .readahead = netfs_readahead, .dirty_folio = netfs_dirty_folio, .launder_folio = afs_launder_folio, - .release_folio = afs_release_folio, - .invalidate_folio = afs_invalidate_folio, + .release_folio = netfs_release_folio, + .invalidate_folio = netfs_invalidate_folio, .write_begin = afs_write_begin, .write_end = afs_write_end, .writepages = afs_writepages, @@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = { const struct address_space_operations afs_symlink_aops = { .read_folio = afs_symlink_read_folio, - .release_folio = afs_release_folio, - .invalidate_folio = afs_invalidate_folio, + .release_folio = netfs_release_folio, + .invalidate_folio = netfs_invalidate_folio, .migrate_folio = filemap_migrate_folio, }; @@ -383,48 +380,6 @@ const struct netfs_request_ops afs_req_ops = { .issue_read = afs_issue_read, }; -/* - * invalidate part or all of a page - * - release a page and clean up its private data if offset is 0 (indicating - * the entire page) - */ -static void afs_invalidate_folio(struct folio *folio, size_t offset, - size_t length) -{ - _enter("{%lu},%zu,%zu", folio->index, offset, length); - - folio_wait_fscache(folio); - _leave(""); -} - -/* - * release a page and clean up its private state if it's not busy - * - return true if the page can now be released, false if not - */ -static bool afs_release_folio(struct folio *folio, gfp_t gfp) -{ - struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); - - _enter("{{%llx:%llu}[%lu],%lx},%x", - vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags, - gfp); - - /* deny if folio is being written to the cache and the caller hasn't - * elected to wait */ -#ifdef CONFIG_AFS_FSCACHE - if (folio_test_fscache(folio)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - folio_wait_fscache(folio); - } - fscache_note_page_release(afs_vnode_cache(vnode)); -#endif - - /* Indicate that the folio can be released */ - _leave(" = T"); - return true; -} - static void afs_add_open_mmap(struct afs_vnode *vnode) { if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) { diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 654f408a0aca..500a87b68a9a 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset, ceph_put_snap_context(snapc); } - folio_wait_fscache(folio); -} - -static bool ceph_release_folio(struct folio *folio, gfp_t gfp) -{ - struct inode *inode = folio->mapping->host; - struct ceph_client *cl = ceph_inode_to_client(inode); - - doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode), - folio->index, folio_test_dirty(folio) ? "" : "not "); - - if (folio_test_private(folio)) - return false; - - if (folio_test_fscache(folio)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - folio_wait_fscache(folio); - } - ceph_fscache_note_page_release(inode); - return true; + netfs_invalidate_folio(folio, offset, length); } static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) @@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = { .write_end = ceph_write_end, .dirty_folio = ceph_dirty_folio, .invalidate_folio = ceph_invalidate_folio, - .release_folio = ceph_release_folio, + .release_folio = netfs_release_folio, .direct_IO = noop_direct_IO, }; diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 68baf55c47a4..d946d85764de 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux) } } EXPORT_SYMBOL(netfs_clear_inode_writeback); + +/* + * netfs_invalidate_folio - Invalidate or partially invalidate a folio + * @folio: Folio proposed for release + * @offset: Offset of the invalidated region + * @length: Length of the invalidated region + * + * Invalidate part or all of a folio for a network filesystem. The folio will + * be removed afterwards if the invalidated region covers the entire folio. + */ +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) +{ + _enter("{%lx},%zx,%zx", folio_index(folio), offset, length); + + folio_wait_fscache(folio); +} +EXPORT_SYMBOL(netfs_invalidate_folio); + +/** + * netfs_release_folio - Try to release a folio + * @folio: Folio proposed for release + * @gfp: Flags qualifying the release + * + * Request release of a folio and clean up its private state if it's not busy. + * Returns true if the folio can now be released, false if not + */ +bool netfs_release_folio(struct folio *folio, gfp_t gfp) +{ + struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); + + if (folio_test_private(folio)) + return false; + if (folio_test_fscache(folio)) { + if (current_is_kswapd() || !(gfp & __GFP_FS)) + return false; + folio_wait_fscache(folio); + } + + fscache_note_page_release(netfs_i_cookie(ctx)); + return true; +} +EXPORT_SYMBOL(netfs_release_folio); diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 06f57d9d09f6..8efbfd3b2820 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -293,11 +293,13 @@ struct readahead_control; void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); int netfs_write_begin(struct netfs_inode *, struct file *, - struct address_space *, loff_t pos, unsigned int len, - struct folio **, void **fsdata); + struct address_space *, loff_t pos, unsigned int len, + struct folio **, void **fsdata); bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); void netfs_clear_inode_writeback(struct inode *inode, const void *aux); +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); +bool netfs_release_folio(struct folio *folio, gfp_t gfp); void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); void netfs_get_subrequest(struct netfs_io_subrequest *subreq,