From fcd9ae4f7f3b5fbd549285bab0478a339113620e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 7 Apr 2021 21:18:55 +0100 Subject: mm/filemap: Pass the file_ra_state in the ractl For readahead_expand(), we need to modify the file ra_state, so pass it down by adding it to the ractl. We have to do this because it's not always the same as f_ra in the struct file that is already being passed. Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: David Howells Tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne Link: https://lore.kernel.org/r/20210407201857.3582797-2-willy@infradead.org/ Link: https://lore.kernel.org/r/161789067431.6155.8063840447229665720.stgit@warthog.procyon.org.uk/ # v6 --- fs/ext4/verity.c | 2 +- fs/f2fs/file.c | 2 +- fs/f2fs/verity.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/verity.c b/fs/ext4/verity.c index 00e3cbde472e..07438f46b558 100644 --- a/fs/ext4/verity.c +++ b/fs/ext4/verity.c @@ -370,7 +370,7 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); struct page *page; index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d26ff2ae3f5e..c1e6f669a0c4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4051,7 +4051,7 @@ out: static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx); + DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx); struct address_space *mapping = inode->i_mapping; struct page *page; pgoff_t redirty_idx = page_idx; diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index 054ec852b5ea..a7beff28a3c5 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -228,7 +228,7 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode, pgoff_t index, unsigned long num_ra_pages) { - DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index); + DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); struct page *page; index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT; -- cgit v1.2.3 From 3a5829fefd3bb50a4d724f44d016c74b8f19b352 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 30 Oct 2020 15:26:31 +0000 Subject: netfs: Make a netfs helper module Make a netfs helper module to manage read request segmentation, caching support and transparent huge page support on behalf of a network filesystem. Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588496284.3465195.10102643717770106661.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118135638.1232039.1622182202673126285.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161031028.2537118.1213974428943508753.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340391427.1303470.14884950716721956560.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539531569.286939.18317119181653706665.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653790328.2770958.6710423217716151549.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789071202.6155.16519256513958534906.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/Kconfig | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 fs/netfs/Kconfig (limited to 'fs') diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig new file mode 100644 index 000000000000..2ebf90e6ca95 --- /dev/null +++ b/fs/netfs/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config NETFS_SUPPORT + tristate "Support for network filesystem high-level I/O" + help + This option enables support for network filesystems, including + helpers for high-level buffered I/O, abstracting out read + segmentation, local caching and transparent huge page support. -- cgit v1.2.3 From 3d3c95046742e4eebaa4b891b0b01cbbed94ebbd Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 13 May 2020 17:41:20 +0100 Subject: netfs: Provide readahead and readpage netfs helpers Add a pair of helper functions: (*) netfs_readahead() (*) netfs_readpage() to do the work of handling a readahead or a readpage, where the page(s) that form part of the request may be split between the local cache, the server or just require clearing, and may be single pages and transparent huge pages. This is all handled within the helper. Note that while both will read from the cache if there is data present, only netfs_readahead() will expand the request beyond what it was asked to do, and only netfs_readahead() will write back to the cache. netfs_readpage(), on the other hand, is synchronous and only fetches the page (which might be a THP) it is asked for. The netfs gives the helper parameters from the VM, the cache cookie it wants to use (or NULL) and a table of operations (only one of which is mandatory): (*) expand_readahead() [optional] Called to allow the netfs to request an expansion of a readahead request to meet its own alignment requirements. This is done by changing rreq->start and rreq->len. (*) clamp_length() [optional] Called to allow the netfs to cut down a subrequest to meet its own boundary requirements. If it does this, the helper will generate additional subrequests until the full request is satisfied. (*) is_still_valid() [optional] Called to find out if the data just read from the cache has been invalidated and must be reread from the server. (*) issue_op() [required] Called to ask the netfs to issue a read to the server. The subrequest describes the read. The read request holds information about the file being accessed. The netfs can cache information in rreq->netfs_priv. Upon completion, the netfs should set the error, transferred and can also set FSCACHE_SREQ_CLEAR_TAIL and then call fscache_subreq_terminated(). (*) done() [optional] Called after the pages have been unlocked. The read request is still pinning the file and mapping and may still be pinning pages with PG_fscache. rreq->error indicates any error that has been accumulated. (*) cleanup() [optional] Called when the helper is disposing of a finished read request. This allows the netfs to clear rreq->netfs_priv. Netfs support is enabled with CONFIG_NETFS_SUPPORT=y. It will be built even if CONFIG_FSCACHE=n and in this case much of it should be optimised away, allowing the filesystem to use it even when caching is disabled. Changes: v5: - Comment why netfs_readahead() is putting pages[2]. - Use page_file_mapping() rather than page->mapping[2]. - Use page_index() rather than page->index[2]. - Use set_page_fscache()[3] rather then SetPageFsCache() as this takes an appropriate ref too[4]. v4: - Folded in a kerneldoc comment fix. - Folded in a fix for the error handling in the case that ENOMEM occurs. - Added flag to netfs_subreq_terminated() to indicate that the caller may have been running async and stuff that might sleep needs punting to a workqueue (can't use in_softirq()[1]). Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20210216084230.GA23669@lst.de/ [1] Link: https://lore.kernel.org/r/20210321014202.GF3420@casper.infradead.org/ [2] Link: https://lore.kernel.org/r/2499407.1616505440@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/CAHk-=wh+2gbF7XEjYc=HV9w_2uVzVf7vs60BPz0gFA=+pUm3ww@mail.gmail.com/ [4] Link: https://lore.kernel.org/r/160588497406.3465195.18003475695899726222.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118136849.1232039.8923686136144228724.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161032290.2537118.13400578415247339173.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340394873.1303470.6237319335883242536.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539537375.286939.16642940088716990995.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653795430.2770958.4947584573720000554.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789076581.6155.6745849361504760209.stgit@warthog.procyon.org.uk/ # v6 --- fs/Kconfig | 1 + fs/Makefile | 1 + fs/netfs/Makefile | 6 + fs/netfs/internal.h | 61 +++++ fs/netfs/read_helper.c | 725 +++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 794 insertions(+) create mode 100644 fs/netfs/Makefile create mode 100644 fs/netfs/internal.h create mode 100644 fs/netfs/read_helper.c (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index a55bda4233bb..97e7b77c9309 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -125,6 +125,7 @@ source "fs/overlayfs/Kconfig" menu "Caches" +source "fs/netfs/Kconfig" source "fs/fscache/Kconfig" source "fs/cachefiles/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index 3215fe205256..9c708e1fbe8f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -67,6 +67,7 @@ obj-y += devpts/ obj-$(CONFIG_DLM) += dlm/ # Do not add any filesystems before this line +obj-$(CONFIG_NETFS_SUPPORT) += netfs/ obj-$(CONFIG_FSCACHE) += fscache/ obj-$(CONFIG_REISERFS_FS) += reiserfs/ obj-$(CONFIG_EXT4_FS) += ext4/ diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile new file mode 100644 index 000000000000..4b4eff2ba369 --- /dev/null +++ b/fs/netfs/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +netfs-y := \ + read_helper.o + +obj-$(CONFIG_NETFS_SUPPORT) := netfs.o diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h new file mode 100644 index 000000000000..ee665c0e7dc8 --- /dev/null +++ b/fs/netfs/internal.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Internal definitions for network filesystem support + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "netfs: " fmt + +/* + * read_helper.c + */ +extern unsigned int netfs_debug; + +#define netfs_stat(x) do {} while(0) +#define netfs_stat_d(x) do {} while(0) + +/*****************************************************************************/ +/* + * debug tracing + */ +#define dbgprintk(FMT, ...) \ + printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) + +#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) +#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) +#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) + +#ifdef __KDEBUG +#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) +#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) +#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) + +#elif defined(CONFIG_NETFS_DEBUG) +#define _enter(FMT, ...) \ +do { \ + if (netfs_debug) \ + kenter(FMT, ##__VA_ARGS__); \ +} while (0) + +#define _leave(FMT, ...) \ +do { \ + if (netfs_debug) \ + kleave(FMT, ##__VA_ARGS__); \ +} while (0) + +#define _debug(FMT, ...) \ +do { \ + if (netfs_debug) \ + kdebug(FMT, ##__VA_ARGS__); \ +} while (0) + +#else +#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) +#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) +#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) +#endif diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c new file mode 100644 index 000000000000..30d4bf6bf28a --- /dev/null +++ b/fs/netfs/read_helper.c @@ -0,0 +1,725 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Network filesystem high-level read support. + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +MODULE_DESCRIPTION("Network fs support"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +unsigned netfs_debug; +module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask"); + +static void netfs_rreq_work(struct work_struct *); +static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool); + +static void netfs_put_subrequest(struct netfs_read_subrequest *subreq, + bool was_async) +{ + if (refcount_dec_and_test(&subreq->usage)) + __netfs_put_subrequest(subreq, was_async); +} + +static struct netfs_read_request *netfs_alloc_read_request( + const struct netfs_read_request_ops *ops, void *netfs_priv, + struct file *file) +{ + static atomic_t debug_ids; + struct netfs_read_request *rreq; + + rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL); + if (rreq) { + rreq->netfs_ops = ops; + rreq->netfs_priv = netfs_priv; + rreq->inode = file_inode(file); + rreq->i_size = i_size_read(rreq->inode); + rreq->debug_id = atomic_inc_return(&debug_ids); + INIT_LIST_HEAD(&rreq->subrequests); + INIT_WORK(&rreq->work, netfs_rreq_work); + refcount_set(&rreq->usage, 1); + __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); + ops->init_rreq(rreq, file); + } + + return rreq; +} + +static void netfs_get_read_request(struct netfs_read_request *rreq) +{ + refcount_inc(&rreq->usage); +} + +static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq, + bool was_async) +{ + struct netfs_read_subrequest *subreq; + + while (!list_empty(&rreq->subrequests)) { + subreq = list_first_entry(&rreq->subrequests, + struct netfs_read_subrequest, rreq_link); + list_del(&subreq->rreq_link); + netfs_put_subrequest(subreq, was_async); + } +} + +static void netfs_free_read_request(struct work_struct *work) +{ + struct netfs_read_request *rreq = + container_of(work, struct netfs_read_request, work); + netfs_rreq_clear_subreqs(rreq, false); + if (rreq->netfs_priv) + rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); + kfree(rreq); +} + +static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async) +{ + if (refcount_dec_and_test(&rreq->usage)) { + if (was_async) { + rreq->work.func = netfs_free_read_request; + if (!queue_work(system_unbound_wq, &rreq->work)) + BUG(); + } else { + netfs_free_read_request(&rreq->work); + } + } +} + +/* + * Allocate and partially initialise an I/O request structure. + */ +static struct netfs_read_subrequest *netfs_alloc_subrequest( + struct netfs_read_request *rreq) +{ + struct netfs_read_subrequest *subreq; + + subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL); + if (subreq) { + INIT_LIST_HEAD(&subreq->rreq_link); + refcount_set(&subreq->usage, 2); + subreq->rreq = rreq; + netfs_get_read_request(rreq); + } + + return subreq; +} + +static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq) +{ + refcount_inc(&subreq->usage); +} + +static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq, + bool was_async) +{ + struct netfs_read_request *rreq = subreq->rreq; + + kfree(subreq); + netfs_put_read_request(rreq, was_async); +} + +/* + * Clear the unread part of an I/O request. + */ +static void netfs_clear_unread(struct netfs_read_subrequest *subreq) +{ + struct iov_iter iter; + + iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages, + subreq->start + subreq->transferred, + subreq->len - subreq->transferred); + iov_iter_zero(iov_iter_count(&iter), &iter); +} + +/* + * Fill a subrequest region with zeroes. + */ +static void netfs_fill_with_zeroes(struct netfs_read_request *rreq, + struct netfs_read_subrequest *subreq) +{ + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + netfs_subreq_terminated(subreq, 0, false); +} + +/* + * Ask the netfs to issue a read request to the server for us. + * + * The netfs is expected to read from subreq->pos + subreq->transferred to + * subreq->pos + subreq->len - 1. It may not backtrack and write data into the + * buffer prior to the transferred point as it might clobber dirty data + * obtained from the cache. + * + * Alternatively, the netfs is allowed to indicate one of two things: + * + * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and + * make progress. + * + * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be + * cleared. + */ +static void netfs_read_from_server(struct netfs_read_request *rreq, + struct netfs_read_subrequest *subreq) +{ + rreq->netfs_ops->issue_op(subreq); +} + +/* + * Release those waiting. + */ +static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async) +{ + netfs_rreq_clear_subreqs(rreq, was_async); + netfs_put_read_request(rreq, was_async); +} + +/* + * Unlock the pages in a read operation. We need to set PG_fscache on any + * pages we're going to write back before we unlock them. + */ +static void netfs_rreq_unlock(struct netfs_read_request *rreq) +{ + struct netfs_read_subrequest *subreq; + struct page *page; + unsigned int iopos, account = 0; + pgoff_t start_page = rreq->start / PAGE_SIZE; + pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; + bool subreq_failed = false; + int i; + + XA_STATE(xas, &rreq->mapping->i_pages, start_page); + + if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { + __clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + } + } + + /* Walk through the pagecache and the I/O request lists simultaneously. + * We may have a mixture of cached and uncached sections and we only + * really want to write out the uncached sections. This is slightly + * complicated by the possibility that we might have huge pages with a + * mixture inside. + */ + subreq = list_first_entry(&rreq->subrequests, + struct netfs_read_subrequest, rreq_link); + iopos = 0; + subreq_failed = (subreq->error < 0); + + rcu_read_lock(); + xas_for_each(&xas, page, last_page) { + unsigned int pgpos = (page->index - start_page) * PAGE_SIZE; + unsigned int pgend = pgpos + thp_size(page); + bool pg_failed = false; + + for (;;) { + if (!subreq) { + pg_failed = true; + break; + } + if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) + set_page_fscache(page); + pg_failed |= subreq_failed; + if (pgend < iopos + subreq->len) + break; + + account += subreq->transferred; + iopos += subreq->len; + if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { + subreq = list_next_entry(subreq, rreq_link); + subreq_failed = (subreq->error < 0); + } else { + subreq = NULL; + subreq_failed = false; + } + if (pgend == iopos) + break; + } + + if (!pg_failed) { + for (i = 0; i < thp_nr_pages(page); i++) + flush_dcache_page(page); + SetPageUptodate(page); + } + + if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) { + if (page->index == rreq->no_unlock_page && + test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags)) + _debug("no unlock"); + else + unlock_page(page); + } + } + rcu_read_unlock(); + + task_io_account_read(account); + if (rreq->netfs_ops->done) + rreq->netfs_ops->done(rreq); +} + +/* + * Handle a short read. + */ +static void netfs_rreq_short_read(struct netfs_read_request *rreq, + struct netfs_read_subrequest *subreq) +{ + __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); + + netfs_get_read_subrequest(subreq); + atomic_inc(&rreq->nr_rd_ops); + netfs_read_from_server(rreq, subreq); +} + +/* + * Resubmit any short or failed operations. Returns true if we got the rreq + * ref back. + */ +static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) +{ + struct netfs_read_subrequest *subreq; + + WARN_ON(in_interrupt()); + + /* We don't want terminating submissions trying to wake us up whilst + * we're still going through the list. + */ + atomic_inc(&rreq->nr_rd_ops); + + __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + if (subreq->error) { + if (subreq->source != NETFS_READ_FROM_CACHE) + break; + subreq->source = NETFS_DOWNLOAD_FROM_SERVER; + subreq->error = 0; + netfs_get_read_subrequest(subreq); + atomic_inc(&rreq->nr_rd_ops); + netfs_read_from_server(rreq, subreq); + } else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) { + netfs_rreq_short_read(rreq, subreq); + } + } + + /* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_rd_ops)) + return true; + + wake_up_var(&rreq->nr_rd_ops); + return false; +} + +/* + * Assess the state of a read request and decide what to do next. + * + * Note that we could be in an ordinary kernel thread, on a workqueue or in + * softirq context at this point. We inherit a ref from the caller. + */ +static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async) +{ +again: + if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && + test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { + if (netfs_rreq_perform_resubmissions(rreq)) + goto again; + return; + } + + netfs_rreq_unlock(rreq); + + clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); + wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); + + netfs_rreq_completed(rreq, was_async); +} + +static void netfs_rreq_work(struct work_struct *work) +{ + struct netfs_read_request *rreq = + container_of(work, struct netfs_read_request, work); + netfs_rreq_assess(rreq, false); +} + +/* + * Handle the completion of all outstanding I/O operations on a read request. + * We inherit a ref from the caller. + */ +static void netfs_rreq_terminated(struct netfs_read_request *rreq, + bool was_async) +{ + if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) && + was_async) { + if (!queue_work(system_unbound_wq, &rreq->work)) + BUG(); + } else { + netfs_rreq_assess(rreq, was_async); + } +} + +/** + * netfs_subreq_terminated - Note the termination of an I/O operation. + * @subreq: The I/O request that has terminated. + * @transferred_or_error: The amount of data transferred or an error code. + * @was_async: The termination was asynchronous + * + * This tells the read helper that a contributory I/O operation has terminated, + * one way or another, and that it should integrate the results. + * + * The caller indicates in @transferred_or_error the outcome of the operation, + * supplying a positive value to indicate the number of bytes transferred, 0 to + * indicate a failure to transfer anything that should be retried or a negative + * error code. The helper will look after reissuing I/O operations as + * appropriate and writing downloaded data to the cache. + * + * If @was_async is true, the caller might be running in softirq or interrupt + * context and we can't sleep. + */ +void netfs_subreq_terminated(struct netfs_read_subrequest *subreq, + ssize_t transferred_or_error, + bool was_async) +{ + struct netfs_read_request *rreq = subreq->rreq; + int u; + + _enter("[%u]{%llx,%lx},%zd", + subreq->debug_index, subreq->start, subreq->flags, + transferred_or_error); + + if (IS_ERR_VALUE(transferred_or_error)) { + subreq->error = transferred_or_error; + goto failed; + } + + if (WARN(transferred_or_error > subreq->len - subreq->transferred, + "Subreq overread: R%x[%x] %zd > %zu - %zu", + rreq->debug_id, subreq->debug_index, + transferred_or_error, subreq->len, subreq->transferred)) + transferred_or_error = subreq->len - subreq->transferred; + + subreq->error = 0; + subreq->transferred += transferred_or_error; + if (subreq->transferred < subreq->len) + goto incomplete; + +complete: + __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); + if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) + set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + +out: + /* If we decrement nr_rd_ops to 0, the ref belongs to us. */ + u = atomic_dec_return(&rreq->nr_rd_ops); + if (u == 0) + netfs_rreq_terminated(rreq, was_async); + else if (u == 1) + wake_up_var(&rreq->nr_rd_ops); + + netfs_put_subrequest(subreq, was_async); + return; + +incomplete: + if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) { + netfs_clear_unread(subreq); + subreq->transferred = subreq->len; + goto complete; + } + + if (transferred_or_error == 0) { + if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { + subreq->error = -ENODATA; + goto failed; + } + } else { + __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); + } + + __set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); + set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); + goto out; + +failed: + if (subreq->source == NETFS_READ_FROM_CACHE) { + set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); + } else { + set_bit(NETFS_RREQ_FAILED, &rreq->flags); + rreq->error = subreq->error; + } + goto out; +} +EXPORT_SYMBOL(netfs_subreq_terminated); + +static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq, + loff_t i_size) +{ + struct netfs_read_request *rreq = subreq->rreq; + + if (subreq->start >= rreq->i_size) + return NETFS_FILL_WITH_ZEROES; + return NETFS_DOWNLOAD_FROM_SERVER; +} + +/* + * Work out what sort of subrequest the next one will be. + */ +static enum netfs_read_source +netfs_rreq_prepare_read(struct netfs_read_request *rreq, + struct netfs_read_subrequest *subreq) +{ + enum netfs_read_source source; + + _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); + + source = netfs_cache_prepare_read(subreq, rreq->i_size); + if (source == NETFS_INVALID_READ) + goto out; + + if (source == NETFS_DOWNLOAD_FROM_SERVER) { + /* Call out to the netfs to let it shrink the request to fit + * its own I/O sizes and boundaries. If it shinks it here, it + * will be called again to make simultaneous calls; if it wants + * to make serial calls, it can indicate a short read and then + * we will call it again. + */ + if (subreq->len > rreq->i_size - subreq->start) + subreq->len = rreq->i_size - subreq->start; + + if (rreq->netfs_ops->clamp_length && + !rreq->netfs_ops->clamp_length(subreq)) { + source = NETFS_INVALID_READ; + goto out; + } + } + + if (WARN_ON(subreq->len == 0)) + source = NETFS_INVALID_READ; + +out: + subreq->source = source; + return source; +} + +/* + * Slice off a piece of a read request and submit an I/O request for it. + */ +static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq, + unsigned int *_debug_index) +{ + struct netfs_read_subrequest *subreq; + enum netfs_read_source source; + + subreq = netfs_alloc_subrequest(rreq); + if (!subreq) + return false; + + subreq->debug_index = (*_debug_index)++; + subreq->start = rreq->start + rreq->submitted; + subreq->len = rreq->len - rreq->submitted; + + _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); + list_add_tail(&subreq->rreq_link, &rreq->subrequests); + + /* Call out to the cache to find out what it can do with the remaining + * subset. It tells us in subreq->flags what it decided should be done + * and adjusts subreq->len down if the subset crosses a cache boundary. + * + * Then when we hand the subset, it can choose to take a subset of that + * (the starts must coincide), in which case, we go around the loop + * again and ask it to download the next piece. + */ + source = netfs_rreq_prepare_read(rreq, subreq); + if (source == NETFS_INVALID_READ) + goto subreq_failed; + + atomic_inc(&rreq->nr_rd_ops); + + rreq->submitted += subreq->len; + + switch (source) { + case NETFS_FILL_WITH_ZEROES: + netfs_fill_with_zeroes(rreq, subreq); + break; + case NETFS_DOWNLOAD_FROM_SERVER: + netfs_read_from_server(rreq, subreq); + break; + default: + BUG(); + } + + return true; + +subreq_failed: + rreq->error = subreq->error; + netfs_put_subrequest(subreq, false); + return false; +} + +static void netfs_rreq_expand(struct netfs_read_request *rreq, + struct readahead_control *ractl) +{ + /* Give the netfs a chance to change the request parameters. The + * resultant request must contain the original region. + */ + if (rreq->netfs_ops->expand_readahead) + rreq->netfs_ops->expand_readahead(rreq); + + /* Expand the request if the cache wants it to start earlier. Note + * that the expansion may get further extended if the VM wishes to + * insert THPs and the preferred start and/or end wind up in the middle + * of THPs. + * + * If this is the case, however, the THP size should be an integer + * multiple of the cache granule size, so we get a whole number of + * granules to deal with. + */ + if (rreq->start != readahead_pos(ractl) || + rreq->len != readahead_length(ractl)) { + readahead_expand(ractl, rreq->start, rreq->len); + rreq->start = readahead_pos(ractl); + rreq->len = readahead_length(ractl); + } +} + +/** + * netfs_readahead - Helper to manage a read request + * @ractl: The description of the readahead request + * @ops: The network filesystem's operations for the helper to use + * @netfs_priv: Private netfs data to be retained in the request + * + * Fulfil a readahead request by drawing data from the cache if possible, or + * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O + * requests from different sources will get munged together. If necessary, the + * readahead window can be expanded in either direction to a more convenient + * alighment for RPC efficiency or to make storage in the cache feasible. + * + * The calling netfs must provide a table of operations, only one of which, + * issue_op, is mandatory. It may also be passed a private token, which will + * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup(). + * + * This is usable whether or not caching is enabled. + */ +void netfs_readahead(struct readahead_control *ractl, + const struct netfs_read_request_ops *ops, + void *netfs_priv) +{ + struct netfs_read_request *rreq; + struct page *page; + unsigned int debug_index = 0; + + _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); + + if (readahead_count(ractl) == 0) + goto cleanup; + + rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file); + if (!rreq) + goto cleanup; + rreq->mapping = ractl->mapping; + rreq->start = readahead_pos(ractl); + rreq->len = readahead_length(ractl); + + netfs_rreq_expand(rreq, ractl); + + atomic_set(&rreq->nr_rd_ops, 1); + do { + if (!netfs_rreq_submit_slice(rreq, &debug_index)) + break; + + } while (rreq->submitted < rreq->len); + + /* Drop the refs on the pages here rather than in the cache or + * filesystem. The locks will be dropped in netfs_rreq_unlock(). + */ + while ((page = readahead_page(ractl))) + put_page(page); + + /* If we decrement nr_rd_ops to 0, the ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_rd_ops)) + netfs_rreq_assess(rreq, false); + return; + +cleanup: + if (netfs_priv) + ops->cleanup(ractl->mapping, netfs_priv); + return; +} +EXPORT_SYMBOL(netfs_readahead); + +/** + * netfs_page - Helper to manage a readpage request + * @file: The file to read from + * @page: The page to read + * @ops: The network filesystem's operations for the helper to use + * @netfs_priv: Private netfs data to be retained in the request + * + * Fulfil a readpage request by drawing data from the cache if possible, or the + * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests + * from different sources will get munged together. + * + * The calling netfs must provide a table of operations, only one of which, + * issue_op, is mandatory. It may also be passed a private token, which will + * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup(). + * + * This is usable whether or not caching is enabled. + */ +int netfs_readpage(struct file *file, + struct page *page, + const struct netfs_read_request_ops *ops, + void *netfs_priv) +{ + struct netfs_read_request *rreq; + unsigned int debug_index = 0; + int ret; + + _enter("%lx", page_index(page)); + + rreq = netfs_alloc_read_request(ops, netfs_priv, file); + if (!rreq) { + if (netfs_priv) + ops->cleanup(netfs_priv, page_file_mapping(page)); + unlock_page(page); + return -ENOMEM; + } + rreq->mapping = page_file_mapping(page); + rreq->start = page_index(page) * PAGE_SIZE; + rreq->len = thp_size(page); + + netfs_get_read_request(rreq); + + atomic_set(&rreq->nr_rd_ops, 1); + do { + if (!netfs_rreq_submit_slice(rreq, &debug_index)) + break; + + } while (rreq->submitted < rreq->len); + + /* Keep nr_rd_ops incremented so that the ref always belongs to us, and + * the service code isn't punted off to a random thread pool to + * process. + */ + do { + wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1); + netfs_rreq_assess(rreq, false); + } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); + + ret = rreq->error; + if (ret == 0 && rreq->submitted < rreq->len) + ret = -EIO; + netfs_put_read_request(rreq, false); + return ret; +} +EXPORT_SYMBOL(netfs_readpage); -- cgit v1.2.3 From 77b4d2c6316ab096e3f77eea240144941434f2a4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 18 Sep 2020 09:25:13 +0100 Subject: netfs: Add tracepoints Add three tracepoints to track the activity of the read helpers: (1) netfs/netfs_read This logs entry to the read helpers and also expansion of the range in a readahead request. (2) netfs/netfs_rreq This logs the progress of netfs_read_request objects which track read requests. A read request may be a compound of multiple subrequests. (3) netfs/netfs_sreq This logs the progress of netfs_read_subrequest objects, which track the contributions from various sources to a read request. Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/161118138060.1232039.5353374588021776217.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161033468.2537118.14021843889844001905.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340395843.1303470.7355519662919639648.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539538693.286939.10171713520419106334.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653796447.2770958.1870655382450862155.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789078003.6155.17814844411672989942.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/read_helper.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'fs') diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 30d4bf6bf28a..799eee7f4ee6 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -16,6 +16,8 @@ #include #include #include "internal.h" +#define CREATE_TRACE_POINTS +#include MODULE_DESCRIPTION("Network fs support"); MODULE_AUTHOR("Red Hat, Inc."); @@ -84,6 +86,7 @@ static void netfs_free_read_request(struct work_struct *work) netfs_rreq_clear_subreqs(rreq, false); if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); + trace_netfs_rreq(rreq, netfs_rreq_trace_free); kfree(rreq); } @@ -129,6 +132,7 @@ static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq, { struct netfs_read_request *rreq = subreq->rreq; + trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); netfs_put_read_request(rreq, was_async); } @@ -183,6 +187,7 @@ static void netfs_read_from_server(struct netfs_read_request *rreq, */ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async) { + trace_netfs_rreq(rreq, netfs_rreq_trace_done); netfs_rreq_clear_subreqs(rreq, was_async); netfs_put_read_request(rreq, was_async); } @@ -221,6 +226,8 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq) iopos = 0; subreq_failed = (subreq->error < 0); + trace_netfs_rreq(rreq, netfs_rreq_trace_unlock); + rcu_read_lock(); xas_for_each(&xas, page, last_page) { unsigned int pgpos = (page->index - start_page) * PAGE_SIZE; @@ -281,6 +288,8 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq, __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); + trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); + netfs_get_read_subrequest(subreq); atomic_inc(&rreq->nr_rd_ops); netfs_read_from_server(rreq, subreq); @@ -296,6 +305,8 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) WARN_ON(in_interrupt()); + trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); + /* We don't want terminating submissions trying to wake us up whilst * we're still going through the list. */ @@ -308,6 +319,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) break; subreq->source = NETFS_DOWNLOAD_FROM_SERVER; subreq->error = 0; + trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); netfs_get_read_subrequest(subreq); atomic_inc(&rreq->nr_rd_ops); netfs_read_from_server(rreq, subreq); @@ -332,6 +344,8 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) */ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async) { + trace_netfs_rreq(rreq, netfs_rreq_trace_assess); + again: if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { @@ -422,6 +436,8 @@ complete: set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); out: + trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); + /* If we decrement nr_rd_ops to 0, the ref belongs to us. */ u = atomic_dec_return(&rreq->nr_rd_ops); if (u == 0) @@ -510,6 +526,7 @@ netfs_rreq_prepare_read(struct netfs_read_request *rreq, out: subreq->source = source; + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); return source; } @@ -549,6 +566,7 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq, rreq->submitted += subreq->len; + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); switch (source) { case NETFS_FILL_WITH_ZEROES: netfs_fill_with_zeroes(rreq, subreq); @@ -591,6 +609,9 @@ static void netfs_rreq_expand(struct netfs_read_request *rreq, readahead_expand(ractl, rreq->start, rreq->len); rreq->start = readahead_pos(ractl); rreq->len = readahead_length(ractl); + + trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), + netfs_read_trace_expanded); } } @@ -632,6 +653,9 @@ void netfs_readahead(struct readahead_control *ractl, rreq->start = readahead_pos(ractl); rreq->len = readahead_length(ractl); + trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), + netfs_read_trace_readahead); + netfs_rreq_expand(rreq, ractl); atomic_set(&rreq->nr_rd_ops, 1); @@ -698,6 +722,8 @@ int netfs_readpage(struct file *file, rreq->start = page_index(page) * PAGE_SIZE; rreq->len = thp_size(page); + trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); + netfs_get_read_request(rreq); atomic_set(&rreq->nr_rd_ops, 1); -- cgit v1.2.3 From 289af54cc67ace285b6d4335a54324562894c4e2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 3 Nov 2020 11:32:41 +0000 Subject: netfs: Gather stats Gather statistics from the netfs interface that can be exported through a seqfile. This is intended to be called by a later patch when viewing /proc/fs/fscache/stats. Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/161118139247.1232039.10556850937548511068.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161034669.2537118.2761232524997091480.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340397101.1303470.17581910581108378458.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539539959.286939.6794352576462965914.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653797700.2770958.5801990354413178228.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789079281.6155.17141344853277186500.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/Kconfig | 15 ++++++++++++++ fs/netfs/Makefile | 3 +-- fs/netfs/internal.h | 34 +++++++++++++++++++++++++++++++ fs/netfs/read_helper.c | 23 +++++++++++++++++++++ fs/netfs/stats.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 127 insertions(+), 2 deletions(-) create mode 100644 fs/netfs/stats.c (limited to 'fs') diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig index 2ebf90e6ca95..578112713703 100644 --- a/fs/netfs/Kconfig +++ b/fs/netfs/Kconfig @@ -6,3 +6,18 @@ config NETFS_SUPPORT This option enables support for network filesystems, including helpers for high-level buffered I/O, abstracting out read segmentation, local caching and transparent huge page support. + +config NETFS_STATS + bool "Gather statistical information on local caching" + depends on NETFS_SUPPORT && PROC_FS + help + This option causes statistical information to be gathered on local + caching and exported through file: + + /proc/fs/fscache/stats + + The gathering of statistics adds a certain amount of overhead to + execution as there are a quite a few stats gathered, and on a + multi-CPU system these may be on cachelines that keep bouncing + between CPUs. On the other hand, the stats are very useful for + debugging purposes. Saying 'Y' here is recommended. diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile index 4b4eff2ba369..c15bfc966d96 100644 --- a/fs/netfs/Makefile +++ b/fs/netfs/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -netfs-y := \ - read_helper.o +netfs-y := read_helper.o stats.o obj-$(CONFIG_NETFS_SUPPORT) := netfs.o diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index ee665c0e7dc8..98b6f4516da1 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -16,8 +16,42 @@ */ extern unsigned int netfs_debug; +/* + * stats.c + */ +#ifdef CONFIG_NETFS_STATS +extern atomic_t netfs_n_rh_readahead; +extern atomic_t netfs_n_rh_readpage; +extern atomic_t netfs_n_rh_rreq; +extern atomic_t netfs_n_rh_sreq; +extern atomic_t netfs_n_rh_download; +extern atomic_t netfs_n_rh_download_done; +extern atomic_t netfs_n_rh_download_failed; +extern atomic_t netfs_n_rh_download_instead; +extern atomic_t netfs_n_rh_read; +extern atomic_t netfs_n_rh_read_done; +extern atomic_t netfs_n_rh_read_failed; +extern atomic_t netfs_n_rh_zero; +extern atomic_t netfs_n_rh_short_read; +extern atomic_t netfs_n_rh_write; +extern atomic_t netfs_n_rh_write_done; +extern atomic_t netfs_n_rh_write_failed; + + +static inline void netfs_stat(atomic_t *stat) +{ + atomic_inc(stat); +} + +static inline void netfs_stat_d(atomic_t *stat) +{ + atomic_dec(stat); +} + +#else #define netfs_stat(x) do {} while(0) #define netfs_stat_d(x) do {} while(0) +#endif /*****************************************************************************/ /* diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 799eee7f4ee6..6d6ed30f417e 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -56,6 +56,7 @@ static struct netfs_read_request *netfs_alloc_read_request( refcount_set(&rreq->usage, 1); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); ops->init_rreq(rreq, file); + netfs_stat(&netfs_n_rh_rreq); } return rreq; @@ -88,6 +89,7 @@ static void netfs_free_read_request(struct work_struct *work) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); kfree(rreq); + netfs_stat_d(&netfs_n_rh_rreq); } static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async) @@ -117,6 +119,7 @@ static struct netfs_read_subrequest *netfs_alloc_subrequest( refcount_set(&subreq->usage, 2); subreq->rreq = rreq; netfs_get_read_request(rreq); + netfs_stat(&netfs_n_rh_sreq); } return subreq; @@ -134,6 +137,7 @@ static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq, trace_netfs_sreq(subreq, netfs_sreq_trace_free); kfree(subreq); + netfs_stat_d(&netfs_n_rh_sreq); netfs_put_read_request(rreq, was_async); } @@ -156,6 +160,7 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq) static void netfs_fill_with_zeroes(struct netfs_read_request *rreq, struct netfs_read_subrequest *subreq) { + netfs_stat(&netfs_n_rh_zero); __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); netfs_subreq_terminated(subreq, 0, false); } @@ -179,6 +184,7 @@ static void netfs_fill_with_zeroes(struct netfs_read_request *rreq, static void netfs_read_from_server(struct netfs_read_request *rreq, struct netfs_read_subrequest *subreq) { + netfs_stat(&netfs_n_rh_download); rreq->netfs_ops->issue_op(subreq); } @@ -288,6 +294,7 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq, __clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags); __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags); + netfs_stat(&netfs_n_rh_short_read); trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short); netfs_get_read_subrequest(subreq); @@ -319,6 +326,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) break; subreq->source = NETFS_DOWNLOAD_FROM_SERVER; subreq->error = 0; + netfs_stat(&netfs_n_rh_download_instead); trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead); netfs_get_read_subrequest(subreq); atomic_inc(&rreq->nr_rd_ops); @@ -414,6 +422,17 @@ void netfs_subreq_terminated(struct netfs_read_subrequest *subreq, subreq->debug_index, subreq->start, subreq->flags, transferred_or_error); + switch (subreq->source) { + case NETFS_READ_FROM_CACHE: + netfs_stat(&netfs_n_rh_read_done); + break; + case NETFS_DOWNLOAD_FROM_SERVER: + netfs_stat(&netfs_n_rh_download_done); + break; + default: + break; + } + if (IS_ERR_VALUE(transferred_or_error)) { subreq->error = transferred_or_error; goto failed; @@ -470,8 +489,10 @@ incomplete: failed: if (subreq->source == NETFS_READ_FROM_CACHE) { + netfs_stat(&netfs_n_rh_read_failed); set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); } else { + netfs_stat(&netfs_n_rh_download_failed); set_bit(NETFS_RREQ_FAILED, &rreq->flags); rreq->error = subreq->error; } @@ -653,6 +674,7 @@ void netfs_readahead(struct readahead_control *ractl, rreq->start = readahead_pos(ractl); rreq->len = readahead_length(ractl); + netfs_stat(&netfs_n_rh_readahead); trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), netfs_read_trace_readahead); @@ -722,6 +744,7 @@ int netfs_readpage(struct file *file, rreq->start = page_index(page) * PAGE_SIZE; rreq->len = thp_size(page); + netfs_stat(&netfs_n_rh_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); netfs_get_read_request(rreq); diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c new file mode 100644 index 000000000000..df6ff5718f25 --- /dev/null +++ b/fs/netfs/stats.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Netfs support statistics + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include +#include +#include "internal.h" + +atomic_t netfs_n_rh_readahead; +atomic_t netfs_n_rh_readpage; +atomic_t netfs_n_rh_rreq; +atomic_t netfs_n_rh_sreq; +atomic_t netfs_n_rh_download; +atomic_t netfs_n_rh_download_done; +atomic_t netfs_n_rh_download_failed; +atomic_t netfs_n_rh_download_instead; +atomic_t netfs_n_rh_read; +atomic_t netfs_n_rh_read_done; +atomic_t netfs_n_rh_read_failed; +atomic_t netfs_n_rh_zero; +atomic_t netfs_n_rh_short_read; +atomic_t netfs_n_rh_write; +atomic_t netfs_n_rh_write_done; +atomic_t netfs_n_rh_write_failed; + +void netfs_stats_show(struct seq_file *m) +{ + seq_printf(m, "RdHelp : RA=%u RP=%u rr=%u sr=%u\n", + atomic_read(&netfs_n_rh_readahead), + atomic_read(&netfs_n_rh_readpage), + atomic_read(&netfs_n_rh_rreq), + atomic_read(&netfs_n_rh_sreq)); + seq_printf(m, "RdHelp : ZR=%u sh=%u\n", + atomic_read(&netfs_n_rh_zero), + atomic_read(&netfs_n_rh_short_read)); + seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n", + atomic_read(&netfs_n_rh_download), + atomic_read(&netfs_n_rh_download_done), + atomic_read(&netfs_n_rh_download_failed), + atomic_read(&netfs_n_rh_download_instead)); + seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n", + atomic_read(&netfs_n_rh_read), + atomic_read(&netfs_n_rh_read_done), + atomic_read(&netfs_n_rh_read_failed)); + seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n", + atomic_read(&netfs_n_rh_write), + atomic_read(&netfs_n_rh_write_done), + atomic_read(&netfs_n_rh_write_failed)); +} +EXPORT_SYMBOL(netfs_stats_show); -- cgit v1.2.3 From e1b1240c1ff5f8bfba797f14996d8bac8a9ec437 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 22 Sep 2020 11:06:07 +0100 Subject: netfs: Add write_begin helper Add a helper to do the pre-reading work for the netfs write_begin address space op. Changes v6: - Fixed a missing rreq put in netfs_write_begin()[3]. - Use DEFINE_READAHEAD()[4]. v5: - Made the wait for PG_fscache in netfs_write_begin() killable[2]. v4: - Added flag to netfs_subreq_terminated() to indicate that the caller may have been running async and stuff that might sleep needs punting to a workqueue (can't use in_softirq()[1]). Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20210216084230.GA23669@lst.de/ [1] Link: https://lore.kernel.org/r/2499407.1616505440@warthog.procyon.org.uk/ [2] Link: https://lore.kernel.org/r/161781042127.463527.9154479794406046987.stgit@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/1234933.1617886271@warthog.procyon.org.uk/ [4] Link: https://lore.kernel.org/r/160588543960.3465195.2792938973035886168.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118140165.1232039.16418853874312234477.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161035539.2537118.15674887534950908530.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340398368.1303470.11242918276563276090.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539541541.286939.1889738674057013729.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653798616.2770958.17213315845968485563.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789080530.6155.1011847312392330491.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/internal.h | 2 + fs/netfs/read_helper.c | 164 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/netfs/stats.c | 11 +++- 3 files changed, 174 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index 98b6f4516da1..b7f2c4459f33 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -34,8 +34,10 @@ extern atomic_t netfs_n_rh_read_failed; extern atomic_t netfs_n_rh_zero; extern atomic_t netfs_n_rh_short_read; extern atomic_t netfs_n_rh_write; +extern atomic_t netfs_n_rh_write_begin; extern atomic_t netfs_n_rh_write_done; extern atomic_t netfs_n_rh_write_failed; +extern atomic_t netfs_n_rh_write_zskip; static inline void netfs_stat(atomic_t *stat) diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 6d6ed30f417e..da34aedea053 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -772,3 +772,167 @@ int netfs_readpage(struct file *file, return ret; } EXPORT_SYMBOL(netfs_readpage); + +static void netfs_clear_thp(struct page *page) +{ + unsigned int i; + + for (i = 0; i < thp_nr_pages(page); i++) + clear_highpage(page + i); +} + +/** + * netfs_write_begin - Helper to prepare for writing + * @file: The file to read from + * @mapping: The mapping to read from + * @pos: File position at which the write will begin + * @len: The length of the write in this page + * @flags: AOP_* flags + * @_page: Where to put the resultant page + * @_fsdata: Place for the netfs to store a cookie + * @ops: The network filesystem's operations for the helper to use + * @netfs_priv: Private netfs data to be retained in the request + * + * Pre-read data for a write-begin request by drawing data from the cache if + * possible, or the netfs if not. Space beyond the EOF is zero-filled. + * Multiple I/O requests from different sources will get munged together. If + * necessary, the readahead window can be expanded in either direction to a + * more convenient alighment for RPC efficiency or to make storage in the cache + * feasible. + * + * The calling netfs must provide a table of operations, only one of which, + * issue_op, is mandatory. + * + * The check_write_begin() operation can be provided to check for and flush + * conflicting writes once the page is grabbed and locked. It is passed a + * pointer to the fsdata cookie that gets returned to the VM to be passed to + * write_end. It is permitted to sleep. It should return 0 if the request + * should go ahead; unlock the page and return -EAGAIN to cause the page to be + * regot; or return an error. + * + * This is usable whether or not caching is enabled. + */ +int netfs_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int flags, + struct page **_page, void **_fsdata, + const struct netfs_read_request_ops *ops, + void *netfs_priv) +{ + struct netfs_read_request *rreq; + struct page *page, *xpage; + struct inode *inode = file_inode(file); + unsigned int debug_index = 0; + pgoff_t index = pos >> PAGE_SHIFT; + int pos_in_page = pos & ~PAGE_MASK; + loff_t size; + int ret; + + DEFINE_READAHEAD(ractl, file, NULL, mapping, index); + +retry: + page = grab_cache_page_write_begin(mapping, index, 0); + if (!page) + return -ENOMEM; + + if (ops->check_write_begin) { + /* Allow the netfs (eg. ceph) to flush conflicts. */ + ret = ops->check_write_begin(file, pos, len, page, _fsdata); + if (ret < 0) { + if (ret == -EAGAIN) + goto retry; + goto error; + } + } + + if (PageUptodate(page)) + goto have_page; + + /* If the page is beyond the EOF, we want to clear it - unless it's + * within the cache granule containing the EOF, in which case we need + * to preload the granule. + */ + size = i_size_read(inode); + if (!ops->is_cache_enabled(inode) && + ((pos_in_page == 0 && len == thp_size(page)) || + (pos >= size) || + (pos_in_page == 0 && (pos + len) >= size))) { + netfs_clear_thp(page); + SetPageUptodate(page); + netfs_stat(&netfs_n_rh_write_zskip); + goto have_page_no_wait; + } + + ret = -ENOMEM; + rreq = netfs_alloc_read_request(ops, netfs_priv, file); + if (!rreq) + goto error; + rreq->mapping = page->mapping; + rreq->start = page->index * PAGE_SIZE; + rreq->len = thp_size(page); + rreq->no_unlock_page = page->index; + __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags); + netfs_priv = NULL; + + netfs_stat(&netfs_n_rh_write_begin); + trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); + + /* Expand the request to meet caching requirements and download + * preferences. + */ + ractl._nr_pages = thp_nr_pages(page); + netfs_rreq_expand(rreq, &ractl); + netfs_get_read_request(rreq); + + /* We hold the page locks, so we can drop the references */ + while ((xpage = readahead_page(&ractl))) + if (xpage != page) + put_page(xpage); + + atomic_set(&rreq->nr_rd_ops, 1); + do { + if (!netfs_rreq_submit_slice(rreq, &debug_index)) + break; + + } while (rreq->submitted < rreq->len); + + /* Keep nr_rd_ops incremented so that the ref always belongs to us, and + * the service code isn't punted off to a random thread pool to + * process. + */ + for (;;) { + wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1); + netfs_rreq_assess(rreq, false); + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) + break; + cond_resched(); + } + + ret = rreq->error; + if (ret == 0 && rreq->submitted < rreq->len) + ret = -EIO; + netfs_put_read_request(rreq, false); + if (ret < 0) + goto error; + +have_page: + ret = wait_on_page_fscache_killable(page); + if (ret < 0) + goto error; +have_page_no_wait: + if (netfs_priv) + ops->cleanup(netfs_priv, mapping); + *_page = page; + _leave(" = 0"); + return 0; + +error_put: + netfs_put_read_request(rreq, false); +error: + unlock_page(page); + put_page(page); + if (netfs_priv) + ops->cleanup(netfs_priv, mapping); + _leave(" = %d", ret); + return ret; +} +EXPORT_SYMBOL(netfs_write_begin); diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c index df6ff5718f25..9ae538c85378 100644 --- a/fs/netfs/stats.c +++ b/fs/netfs/stats.c @@ -24,19 +24,24 @@ atomic_t netfs_n_rh_read_failed; atomic_t netfs_n_rh_zero; atomic_t netfs_n_rh_short_read; atomic_t netfs_n_rh_write; +atomic_t netfs_n_rh_write_begin; atomic_t netfs_n_rh_write_done; atomic_t netfs_n_rh_write_failed; +atomic_t netfs_n_rh_write_zskip; void netfs_stats_show(struct seq_file *m) { - seq_printf(m, "RdHelp : RA=%u RP=%u rr=%u sr=%u\n", + seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n", atomic_read(&netfs_n_rh_readahead), atomic_read(&netfs_n_rh_readpage), + atomic_read(&netfs_n_rh_write_begin), + atomic_read(&netfs_n_rh_write_zskip), atomic_read(&netfs_n_rh_rreq), atomic_read(&netfs_n_rh_sreq)); - seq_printf(m, "RdHelp : ZR=%u sh=%u\n", + seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n", atomic_read(&netfs_n_rh_zero), - atomic_read(&netfs_n_rh_short_read)); + atomic_read(&netfs_n_rh_short_read), + atomic_read(&netfs_n_rh_write_zskip)); seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n", atomic_read(&netfs_n_rh_download), atomic_read(&netfs_n_rh_download_done), -- cgit v1.2.3 From 726218fdc22c9b52f16e1228499a804bbf262a20 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:24 +0000 Subject: netfs: Define an interface to talk to a cache Add an interface to the netfs helper library for reading data from the cache instead of downloading it from the server and support for writing data just downloaded or cleared to the cache. The API passes an iov_iter to the cache read/write routines to indicate the data/buffer to be used. This is done using the ITER_XARRAY type to provide direct access to the netfs inode's pagecache. When the netfs's ->begin_cache_operation() method is called, this must fill in the cache_resources in the netfs_read_request struct, including the netfs_cache_ops used by the helper lib to talk to the cache. The helper lib does not directly access the cache. Changes: v6: - Call trace_netfs_read() after beginning the cache op so that the cookie debug ID can be logged[3]. - Don't record the error from writing to the cache. We don't want to pass it back to the netfs[4]. - Fix copy-to-cache subreq amalgamation to not round up as it goes along otherwise it overcalculates the length of the write[5]. v5: - Use end_page_fscache() rather than unlock_page_fscache()[2]. v4: - Added flag to netfs_subreq_terminated() to indicate that the caller may have been running async and stuff that might sleep needs punting to a workqueue (can't use in_softirq()[1]). - Add missing inc of netfs_n_rh_read stat. - Move initial definition of fscache_begin_read_operation() elsewhere. - Need to call op->begin_cache_operation() from netfs_write_begin(). Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20210216084230.GA23669@lst.de/ [1] Link: https://lore.kernel.org/r/2499407.1616505440@warthog.procyon.org.uk/ [2] Link: https://lore.kernel.org/r/161781045123.463527.14533348855710902201.stgit@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/161781046256.463527.18158681600085556192.stgit@warthog.procyon.org.uk/ [4] Link: https://lore.kernel.org/r/161781047695.463527.7463536103593997492.stgit@warthog.procyon.org.uk/ [5] Link: https://lore.kernel.org/r/161118141321.1232039.8296910406755622458.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161036700.2537118.11170748455436854978.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340399569.1303470.1138884774643385730.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539542874.286939.13337898213448136687.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653799826.2770958.9015430297426331950.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789081462.6155.3853904866933313256.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/read_helper.c | 239 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 238 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index da34aedea053..cd3b61d5e192 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -88,6 +88,8 @@ static void netfs_free_read_request(struct work_struct *work) if (rreq->netfs_priv) rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); + if (rreq->cache_resources.ops) + rreq->cache_resources.ops->end_operation(&rreq->cache_resources); kfree(rreq); netfs_stat_d(&netfs_n_rh_rreq); } @@ -154,6 +156,34 @@ static void netfs_clear_unread(struct netfs_read_subrequest *subreq) iov_iter_zero(iov_iter_count(&iter), &iter); } +static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, + bool was_async) +{ + struct netfs_read_subrequest *subreq = priv; + + netfs_subreq_terminated(subreq, transferred_or_error, was_async); +} + +/* + * Issue a read against the cache. + * - Eats the caller's ref on subreq. + */ +static void netfs_read_from_cache(struct netfs_read_request *rreq, + struct netfs_read_subrequest *subreq, + bool seek_data) +{ + struct netfs_cache_resources *cres = &rreq->cache_resources; + struct iov_iter iter; + + netfs_stat(&netfs_n_rh_read); + iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, + subreq->start + subreq->transferred, + subreq->len - subreq->transferred); + + cres->ops->read(cres, subreq->start, &iter, seek_data, + netfs_cache_read_terminated, subreq); +} + /* * Fill a subrequest region with zeroes. */ @@ -198,6 +228,141 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async netfs_put_read_request(rreq, was_async); } +/* + * Deal with the completion of writing the data to the cache. We have to clear + * the PG_fscache bits on the pages involved and release the caller's ref. + * + * May be called in softirq mode and we inherit a ref from the caller. + */ +static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq, + bool was_async) +{ + struct netfs_read_subrequest *subreq; + struct page *page; + pgoff_t unlocked = 0; + bool have_unlocked = false; + + rcu_read_lock(); + + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); + + xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) { + /* We might have multiple writes from the same huge + * page, but we mustn't unlock a page more than once. + */ + if (have_unlocked && page->index <= unlocked) + continue; + unlocked = page->index; + end_page_fscache(page); + have_unlocked = true; + } + } + + rcu_read_unlock(); + netfs_rreq_completed(rreq, was_async); +} + +static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, + bool was_async) +{ + struct netfs_read_subrequest *subreq = priv; + struct netfs_read_request *rreq = subreq->rreq; + + if (IS_ERR_VALUE(transferred_or_error)) { + netfs_stat(&netfs_n_rh_write_failed); + } else { + netfs_stat(&netfs_n_rh_write_done); + } + + trace_netfs_sreq(subreq, netfs_sreq_trace_write_term); + + /* If we decrement nr_wr_ops to 0, the ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_wr_ops)) + netfs_rreq_unmark_after_write(rreq, was_async); + + netfs_put_subrequest(subreq, was_async); +} + +/* + * Perform any outstanding writes to the cache. We inherit a ref from the + * caller. + */ +static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) +{ + struct netfs_cache_resources *cres = &rreq->cache_resources; + struct netfs_read_subrequest *subreq, *next, *p; + struct iov_iter iter; + int ret; + + trace_netfs_rreq(rreq, netfs_rreq_trace_write); + + /* We don't want terminating writes trying to wake us up whilst we're + * still going through the list. + */ + atomic_inc(&rreq->nr_wr_ops); + + list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) { + if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) { + list_del_init(&subreq->rreq_link); + netfs_put_subrequest(subreq, false); + } + } + + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + /* Amalgamate adjacent writes */ + while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { + next = list_next_entry(subreq, rreq_link); + if (next->start != subreq->start + subreq->len) + break; + subreq->len += next->len; + list_del_init(&next->rreq_link); + netfs_put_subrequest(next, false); + } + + ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, + rreq->i_size); + if (ret < 0) { + trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip); + continue; + } + + iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, + subreq->start, subreq->len); + + atomic_inc(&rreq->nr_wr_ops); + netfs_stat(&netfs_n_rh_write); + netfs_get_read_subrequest(subreq); + trace_netfs_sreq(subreq, netfs_sreq_trace_write); + cres->ops->write(cres, subreq->start, &iter, + netfs_rreq_copy_terminated, subreq); + } + + /* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */ + if (atomic_dec_and_test(&rreq->nr_wr_ops)) + netfs_rreq_unmark_after_write(rreq, false); +} + +static void netfs_rreq_write_to_cache_work(struct work_struct *work) +{ + struct netfs_read_request *rreq = + container_of(work, struct netfs_read_request, work); + + netfs_rreq_do_write_to_cache(rreq); +} + +static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq, + bool was_async) +{ + if (was_async) { + rreq->work.func = netfs_rreq_write_to_cache_work; + if (!queue_work(system_unbound_wq, &rreq->work)) + BUG(); + } else { + netfs_rreq_do_write_to_cache(rreq); + } +} + /* * Unlock the pages in a read operation. We need to set PG_fscache on any * pages we're going to write back before we unlock them. @@ -299,7 +464,10 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq, netfs_get_read_subrequest(subreq); atomic_inc(&rreq->nr_rd_ops); - netfs_read_from_server(rreq, subreq); + if (subreq->source == NETFS_READ_FROM_CACHE) + netfs_read_from_cache(rreq, subreq, true); + else + netfs_read_from_server(rreq, subreq); } /* @@ -344,6 +512,25 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq) return false; } +/* + * Check to see if the data read is still valid. + */ +static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq) +{ + struct netfs_read_subrequest *subreq; + + if (!rreq->netfs_ops->is_still_valid || + rreq->netfs_ops->is_still_valid(rreq)) + return; + + list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + if (subreq->source == NETFS_READ_FROM_CACHE) { + subreq->error = -ESTALE; + __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags); + } + } +} + /* * Assess the state of a read request and decide what to do next. * @@ -355,6 +542,8 @@ static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async) trace_netfs_rreq(rreq, netfs_rreq_trace_assess); again: + netfs_rreq_is_still_valid(rreq); + if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) && test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) { if (netfs_rreq_perform_resubmissions(rreq)) @@ -367,6 +556,9 @@ again: clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); + if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) + return netfs_rreq_write_to_cache(rreq, was_async); + netfs_rreq_completed(rreq, was_async); } @@ -504,7 +696,10 @@ static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequ loff_t i_size) { struct netfs_read_request *rreq = subreq->rreq; + struct netfs_cache_resources *cres = &rreq->cache_resources; + if (cres->ops) + return cres->ops->prepare_read(subreq, i_size); if (subreq->start >= rreq->i_size) return NETFS_FILL_WITH_ZEROES; return NETFS_DOWNLOAD_FROM_SERVER; @@ -595,6 +790,9 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq, case NETFS_DOWNLOAD_FROM_SERVER: netfs_read_from_server(rreq, subreq); break; + case NETFS_READ_FROM_CACHE: + netfs_read_from_cache(rreq, subreq, false); + break; default: BUG(); } @@ -607,9 +805,23 @@ subreq_failed: return false; } +static void netfs_cache_expand_readahead(struct netfs_read_request *rreq, + loff_t *_start, size_t *_len, loff_t i_size) +{ + struct netfs_cache_resources *cres = &rreq->cache_resources; + + if (cres->ops && cres->ops->expand_readahead) + cres->ops->expand_readahead(cres, _start, _len, i_size); +} + static void netfs_rreq_expand(struct netfs_read_request *rreq, struct readahead_control *ractl) { + /* Give the cache a chance to change the request parameters. The + * resultant request must contain the original region. + */ + netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); + /* Give the netfs a chance to change the request parameters. The * resultant request must contain the original region. */ @@ -661,6 +873,7 @@ void netfs_readahead(struct readahead_control *ractl, struct netfs_read_request *rreq; struct page *page; unsigned int debug_index = 0; + int ret; _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); @@ -674,6 +887,12 @@ void netfs_readahead(struct readahead_control *ractl, rreq->start = readahead_pos(ractl); rreq->len = readahead_length(ractl); + if (ops->begin_cache_operation) { + ret = ops->begin_cache_operation(rreq); + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) + goto cleanup_free; + } + netfs_stat(&netfs_n_rh_readahead); trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), netfs_read_trace_readahead); @@ -698,6 +917,9 @@ void netfs_readahead(struct readahead_control *ractl, netfs_rreq_assess(rreq, false); return; +cleanup_free: + netfs_put_read_request(rreq, false); + return; cleanup: if (netfs_priv) ops->cleanup(ractl->mapping, netfs_priv); @@ -744,6 +966,14 @@ int netfs_readpage(struct file *file, rreq->start = page_index(page) * PAGE_SIZE; rreq->len = thp_size(page); + if (ops->begin_cache_operation) { + ret = ops->begin_cache_operation(rreq); + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) { + unlock_page(page); + goto out; + } + } + netfs_stat(&netfs_n_rh_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); @@ -768,6 +998,7 @@ int netfs_readpage(struct file *file, ret = rreq->error; if (ret == 0 && rreq->submitted < rreq->len) ret = -EIO; +out: netfs_put_read_request(rreq, false); return ret; } @@ -873,6 +1104,12 @@ retry: __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags); netfs_priv = NULL; + if (ops->begin_cache_operation) { + ret = ops->begin_cache_operation(rreq); + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) + goto error_put; + } + netfs_stat(&netfs_n_rh_write_begin); trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); -- cgit v1.2.3 From 0246f3e5737d0b083baefa552fecedd90832dad0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 6 Apr 2021 17:31:54 +0100 Subject: netfs: Add a tracepoint to log failures that would be otherwise unseen Add a tracepoint to log internal failures (such as cache errors) that we don't otherwise want to pass back to the netfs. Signed-off-by: David Howells Tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/161781048813.463527.1557000804674707986.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/161789082749.6155.15498680577213140870.stgit@warthog.procyon.org.uk/ # v6 --- fs/netfs/read_helper.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index cd3b61d5e192..1d3b50c5db6d 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -271,6 +271,8 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, if (IS_ERR_VALUE(transferred_or_error)) { netfs_stat(&netfs_n_rh_write_failed); + trace_netfs_failure(rreq, subreq, transferred_or_error, + netfs_fail_copy_to_cache); } else { netfs_stat(&netfs_n_rh_write_done); } @@ -323,6 +325,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq) ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len, rreq->i_size); if (ret < 0) { + trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write); trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip); continue; } @@ -627,6 +630,8 @@ void netfs_subreq_terminated(struct netfs_read_subrequest *subreq, if (IS_ERR_VALUE(transferred_or_error)) { subreq->error = transferred_or_error; + trace_netfs_failure(rreq, subreq, transferred_or_error, + netfs_fail_read); goto failed; } @@ -996,8 +1001,10 @@ int netfs_readpage(struct file *file, } while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)); ret = rreq->error; - if (ret == 0 && rreq->submitted < rreq->len) + if (ret == 0 && rreq->submitted < rreq->len) { + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage); ret = -EIO; + } out: netfs_put_read_request(rreq, false); return ret; @@ -1069,6 +1076,7 @@ retry: /* Allow the netfs (eg. ceph) to flush conflicts. */ ret = ops->check_write_begin(file, pos, len, page, _fsdata); if (ret < 0) { + trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); if (ret == -EAGAIN) goto retry; goto error; @@ -1145,8 +1153,10 @@ retry: } ret = rreq->error; - if (ret == 0 && rreq->submitted < rreq->len) + if (ret == 0 && rreq->submitted < rreq->len) { + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); ret = -EIO; + } netfs_put_read_request(rreq, false); if (ret < 0) goto error; -- cgit v1.2.3 From 26aaeffcafe6cbb7c3978fa6ed7555122f8c9f8c Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 Feb 2021 11:39:47 +0000 Subject: fscache, cachefiles: Add alternate API to use kiocb for read/write to cache Add an alternate API by which the cache can be accessed through a kiocb, doing async DIO, rather than using the current API that tells the cache where all the pages are. The new API is intended to be used in conjunction with the netfs helper library. A filesystem must pick one or the other and not mix them. Filesystems wanting to use the new API must #define FSCACHE_USE_NEW_IO_API before #including the header. This prevents them from continuing to use the old API at the same time as there are incompatibilities in how the PG_fscache page bit is used. Changes: v6: - Provide a routine to shape a write so that the start and length can be aligned for DIO[3]. v4: - Use the vfs_iocb_iter_read/write() helpers[1] - Move initial definition of fscache_begin_read_operation() here. - Remove a commented-out line[2] - Combine ki->term_func calls in cachefiles_read_complete()[2]. - Remove explicit NULL initialiser[2]. - Remove extern on func decl[2]. - Put in param names on func decl[2]. - Remove redundant else[2]. - Fill out the kdoc comment for fscache_begin_read_operation(). - Rename fs/fscache/page2.c to io.c to match later patches. Signed-off-by: David Howells Reviewed-and-tested-by: Jeff Layton Tested-by: Dave Wysochanski Tested-By: Marc Dionne cc: Christoph Hellwig cc: linux-cachefs@redhat.com cc: linux-afs@lists.infradead.org cc: linux-nfs@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: ceph-devel@vger.kernel.org cc: v9fs-developer@lists.sourceforge.net cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20210216102614.GA27555@lst.de/ [1] Link: https://lore.kernel.org/r/20210216084230.GA23669@lst.de/ [2] Link: https://lore.kernel.org/r/161781047695.463527.7463536103593997492.stgit@warthog.procyon.org.uk/ [3] Link: https://lore.kernel.org/r/161118142558.1232039.17993829899588971439.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161037850.2537118.8819808229350326503.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340402057.1303470.8038373593844486698.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539545919.286939.14573472672781434757.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653801477.2770958.10543270629064934227.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789084517.6155.12799689829859169640.stgit@warthog.procyon.org.uk/ # v6 --- fs/cachefiles/Makefile | 1 + fs/cachefiles/interface.c | 5 +- fs/cachefiles/internal.h | 9 + fs/cachefiles/io.c | 420 ++++++++++++++++++++++++++++++++++++++++++++++ fs/fscache/Kconfig | 1 + fs/fscache/Makefile | 1 + fs/fscache/internal.h | 4 + fs/fscache/io.c | 116 +++++++++++++ fs/fscache/page.c | 2 +- fs/fscache/stats.c | 1 + 10 files changed, 557 insertions(+), 3 deletions(-) create mode 100644 fs/cachefiles/io.c create mode 100644 fs/fscache/io.c (limited to 'fs') diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile index 891dedda5905..2227dc2d5498 100644 --- a/fs/cachefiles/Makefile +++ b/fs/cachefiles/Makefile @@ -7,6 +7,7 @@ cachefiles-y := \ bind.o \ daemon.o \ interface.o \ + io.o \ key.o \ main.o \ namei.o \ diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 5efa6a3702c0..da3948fdb615 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -319,8 +319,8 @@ static void cachefiles_drop_object(struct fscache_object *_object) /* * dispose of a reference to an object */ -static void cachefiles_put_object(struct fscache_object *_object, - enum fscache_obj_ref_trace why) +void cachefiles_put_object(struct fscache_object *_object, + enum fscache_obj_ref_trace why) { struct cachefiles_object *object; struct fscache_cache *cache; @@ -568,4 +568,5 @@ const struct fscache_cache_ops cachefiles_cache_ops = { .uncache_page = cachefiles_uncache_page, .dissociate_pages = cachefiles_dissociate_pages, .check_consistency = cachefiles_check_consistency, + .begin_read_operation = cachefiles_begin_read_operation, }; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index cf9bd6401c2d..4ed83aa5253b 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -150,6 +150,9 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache, */ extern const struct fscache_cache_ops cachefiles_cache_ops; +void cachefiles_put_object(struct fscache_object *_object, + enum fscache_obj_ref_trace why); + /* * key.c */ @@ -217,6 +220,12 @@ extern int cachefiles_allocate_pages(struct fscache_retrieval *, extern int cachefiles_write_page(struct fscache_storage *, struct page *); extern void cachefiles_uncache_page(struct fscache_object *, struct page *); +/* + * rdwr2.c + */ +extern int cachefiles_begin_read_operation(struct netfs_read_request *, + struct fscache_retrieval *); + /* * security.c */ diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c new file mode 100644 index 000000000000..b13fb45fc3f3 --- /dev/null +++ b/fs/cachefiles/io.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* kiocb-using read/write + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" + +struct cachefiles_kiocb { + struct kiocb iocb; + refcount_t ki_refcnt; + loff_t start; + union { + size_t skipped; + size_t len; + }; + netfs_io_terminated_t term_func; + void *term_func_priv; + bool was_async; +}; + +static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki) +{ + if (refcount_dec_and_test(&ki->ki_refcnt)) { + fput(ki->iocb.ki_filp); + kfree(ki); + } +} + +/* + * Handle completion of a read from the cache. + */ +static void cachefiles_read_complete(struct kiocb *iocb, long ret, long ret2) +{ + struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb); + + _enter("%ld,%ld", ret, ret2); + + if (ki->term_func) { + if (ret >= 0) + ret += ki->skipped; + ki->term_func(ki->term_func_priv, ret, ki->was_async); + } + + cachefiles_put_kiocb(ki); +} + +/* + * Initiate a read from the cache. + */ +static int cachefiles_read(struct netfs_cache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + bool seek_data, + netfs_io_terminated_t term_func, + void *term_func_priv) +{ + struct cachefiles_kiocb *ki; + struct file *file = cres->cache_priv2; + unsigned int old_nofs; + ssize_t ret = -ENOBUFS; + size_t len = iov_iter_count(iter), skipped = 0; + + _enter("%pD,%li,%llx,%zx/%llx", + file, file_inode(file)->i_ino, start_pos, len, + i_size_read(file->f_inode)); + + /* If the caller asked us to seek for data before doing the read, then + * we should do that now. If we find a gap, we fill it with zeros. + */ + if (seek_data) { + loff_t off = start_pos, off2; + + off2 = vfs_llseek(file, off, SEEK_DATA); + if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) { + skipped = 0; + ret = off2; + goto presubmission_error; + } + + if (off2 == -ENXIO || off2 >= start_pos + len) { + /* The region is beyond the EOF or there's no more data + * in the region, so clear the rest of the buffer and + * return success. + */ + iov_iter_zero(len, iter); + skipped = len; + ret = 0; + goto presubmission_error; + } + + skipped = off2 - off; + iov_iter_zero(skipped, iter); + } + + ret = -ENOBUFS; + ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL); + if (!ki) + goto presubmission_error; + + refcount_set(&ki->ki_refcnt, 2); + ki->iocb.ki_filp = file; + ki->iocb.ki_pos = start_pos + skipped; + ki->iocb.ki_flags = IOCB_DIRECT; + ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file)); + ki->iocb.ki_ioprio = get_current_ioprio(); + ki->skipped = skipped; + ki->term_func = term_func; + ki->term_func_priv = term_func_priv; + ki->was_async = true; + + if (ki->term_func) + ki->iocb.ki_complete = cachefiles_read_complete; + + get_file(ki->iocb.ki_filp); + + old_nofs = memalloc_nofs_save(); + ret = vfs_iocb_iter_read(file, &ki->iocb, iter); + memalloc_nofs_restore(old_nofs); + switch (ret) { + case -EIOCBQUEUED: + goto in_progress; + + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + /* There's no easy way to restart the syscall since other AIO's + * may be already running. Just fail this IO with EINTR. + */ + ret = -EINTR; + fallthrough; + default: + ki->was_async = false; + cachefiles_read_complete(&ki->iocb, ret, 0); + if (ret > 0) + ret = 0; + break; + } + +in_progress: + cachefiles_put_kiocb(ki); + _leave(" = %zd", ret); + return ret; + +presubmission_error: + if (term_func) + term_func(term_func_priv, ret < 0 ? ret : skipped, false); + return ret; +} + +/* + * Handle completion of a write to the cache. + */ +static void cachefiles_write_complete(struct kiocb *iocb, long ret, long ret2) +{ + struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb); + struct inode *inode = file_inode(ki->iocb.ki_filp); + + _enter("%ld,%ld", ret, ret2); + + /* Tell lockdep we inherited freeze protection from submission thread */ + __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); + __sb_end_write(inode->i_sb, SB_FREEZE_WRITE); + + if (ki->term_func) + ki->term_func(ki->term_func_priv, ret, ki->was_async); + + cachefiles_put_kiocb(ki); +} + +/* + * Initiate a write to the cache. + */ +static int cachefiles_write(struct netfs_cache_resources *cres, + loff_t start_pos, + struct iov_iter *iter, + netfs_io_terminated_t term_func, + void *term_func_priv) +{ + struct cachefiles_kiocb *ki; + struct inode *inode; + struct file *file = cres->cache_priv2; + unsigned int old_nofs; + ssize_t ret = -ENOBUFS; + size_t len = iov_iter_count(iter); + + _enter("%pD,%li,%llx,%zx/%llx", + file, file_inode(file)->i_ino, start_pos, len, + i_size_read(file->f_inode)); + + ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL); + if (!ki) + goto presubmission_error; + + refcount_set(&ki->ki_refcnt, 2); + ki->iocb.ki_filp = file; + ki->iocb.ki_pos = start_pos; + ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE; + ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file)); + ki->iocb.ki_ioprio = get_current_ioprio(); + ki->start = start_pos; + ki->len = len; + ki->term_func = term_func; + ki->term_func_priv = term_func_priv; + ki->was_async = true; + + if (ki->term_func) + ki->iocb.ki_complete = cachefiles_write_complete; + + /* Open-code file_start_write here to grab freeze protection, which + * will be released by another thread in aio_complete_rw(). Fool + * lockdep by telling it the lock got released so that it doesn't + * complain about the held lock when we return to userspace. + */ + inode = file_inode(file); + __sb_start_write(inode->i_sb, SB_FREEZE_WRITE); + __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE); + + get_file(ki->iocb.ki_filp); + + old_nofs = memalloc_nofs_save(); + ret = vfs_iocb_iter_write(file, &ki->iocb, iter); + memalloc_nofs_restore(old_nofs); + switch (ret) { + case -EIOCBQUEUED: + goto in_progress; + + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + /* There's no easy way to restart the syscall since other AIO's + * may be already running. Just fail this IO with EINTR. + */ + ret = -EINTR; + fallthrough; + default: + ki->was_async = false; + cachefiles_write_complete(&ki->iocb, ret, 0); + if (ret > 0) + ret = 0; + break; + } + +in_progress: + cachefiles_put_kiocb(ki); + _leave(" = %zd", ret); + return ret; + +presubmission_error: + if (term_func) + term_func(term_func_priv, -ENOMEM, false); + return -ENOMEM; +} + +/* + * Prepare a read operation, shortening it to a cached/uncached + * boundary as appropriate. + */ +static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq, + loff_t i_size) +{ + struct fscache_retrieval *op = subreq->rreq->cache_resources.cache_priv; + struct cachefiles_object *object; + struct cachefiles_cache *cache; + const struct cred *saved_cred; + struct file *file = subreq->rreq->cache_resources.cache_priv2; + loff_t off, to; + + _enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size); + + object = container_of(op->op.object, + struct cachefiles_object, fscache); + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + + if (!file) + goto cache_fail_nosec; + + if (subreq->start >= i_size) + return NETFS_FILL_WITH_ZEROES; + + cachefiles_begin_secure(cache, &saved_cred); + + off = vfs_llseek(file, subreq->start, SEEK_DATA); + if (off < 0 && off >= (loff_t)-MAX_ERRNO) { + if (off == (loff_t)-ENXIO) + goto download_and_store; + goto cache_fail; + } + + if (off >= subreq->start + subreq->len) + goto download_and_store; + + if (off > subreq->start) { + off = round_up(off, cache->bsize); + subreq->len = off - subreq->start; + goto download_and_store; + } + + to = vfs_llseek(file, subreq->start, SEEK_HOLE); + if (to < 0 && to >= (loff_t)-MAX_ERRNO) + goto cache_fail; + + if (to < subreq->start + subreq->len) { + if (subreq->start + subreq->len >= i_size) + to = round_up(to, cache->bsize); + else + to = round_down(to, cache->bsize); + subreq->len = to - subreq->start; + } + + cachefiles_end_secure(cache, saved_cred); + return NETFS_READ_FROM_CACHE; + +download_and_store: + if (cachefiles_has_space(cache, 0, (subreq->len + PAGE_SIZE - 1) / PAGE_SIZE) == 0) + __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); +cache_fail: + cachefiles_end_secure(cache, saved_cred); +cache_fail_nosec: + return NETFS_DOWNLOAD_FROM_SERVER; +} + +/* + * Prepare for a write to occur. + */ +static int cachefiles_prepare_write(struct netfs_cache_resources *cres, + loff_t *_start, size_t *_len, loff_t i_size) +{ + loff_t start = *_start; + size_t len = *_len, down; + + /* Round to DIO size */ + down = start - round_down(start, PAGE_SIZE); + *_start = start - down; + *_len = round_up(down + len, PAGE_SIZE); + return 0; +} + +/* + * Clean up an operation. + */ +static void cachefiles_end_operation(struct netfs_cache_resources *cres) +{ + struct fscache_retrieval *op = cres->cache_priv; + struct file *file = cres->cache_priv2; + + _enter(""); + + if (file) + fput(file); + if (op) { + fscache_op_complete(&op->op, false); + fscache_put_retrieval(op); + } + + _leave(""); +} + +static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { + .end_operation = cachefiles_end_operation, + .read = cachefiles_read, + .write = cachefiles_write, + .prepare_read = cachefiles_prepare_read, + .prepare_write = cachefiles_prepare_write, +}; + +/* + * Open the cache file when beginning a cache operation. + */ +int cachefiles_begin_read_operation(struct netfs_read_request *rreq, + struct fscache_retrieval *op) +{ + struct cachefiles_object *object; + struct cachefiles_cache *cache; + struct path path; + struct file *file; + + _enter(""); + + object = container_of(op->op.object, + struct cachefiles_object, fscache); + cache = container_of(object->fscache.cache, + struct cachefiles_cache, cache); + + path.mnt = cache->mnt; + path.dentry = object->backer; + file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT, + d_inode(object->backer), cache->cache_cred); + if (IS_ERR(file)) + return PTR_ERR(file); + if (!S_ISREG(file_inode(file)->i_mode)) + goto error_file; + if (unlikely(!file->f_op->read_iter) || + unlikely(!file->f_op->write_iter)) { + pr_notice("Cache does not support read_iter and write_iter\n"); + goto error_file; + } + + fscache_get_retrieval(op); + rreq->cache_resources.cache_priv = op; + rreq->cache_resources.cache_priv2 = file; + rreq->cache_resources.ops = &cachefiles_netfs_cache_ops; + rreq->cookie_debug_id = object->fscache.debug_id; + _leave(""); + return 0; + +error_file: + fput(file); + return -EIO; +} diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig index 5e796e6c38e5..427efa73b9bd 100644 --- a/fs/fscache/Kconfig +++ b/fs/fscache/Kconfig @@ -2,6 +2,7 @@ config FSCACHE tristate "General filesystem local caching manager" + select NETFS_SUPPORT help This option enables a generic filesystem caching manager that can be used by various network and other filesystems to cache data locally. diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile index 79e08e05ef84..3b2ffa93ac18 100644 --- a/fs/fscache/Makefile +++ b/fs/fscache/Makefile @@ -7,6 +7,7 @@ fscache-y := \ cache.o \ cookie.o \ fsdef.o \ + io.o \ main.o \ netfs.o \ object.o \ diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 08e91efbce53..c483863b740a 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h @@ -142,6 +142,10 @@ extern int fscache_wait_for_operation_activation(struct fscache_object *, atomic_t *, atomic_t *); extern void fscache_invalidate_writes(struct fscache_cookie *); +struct fscache_retrieval *fscache_alloc_retrieval(struct fscache_cookie *cookie, + struct address_space *mapping, + fscache_rw_complete_t end_io_func, + void *context); /* * proc.c diff --git a/fs/fscache/io.c b/fs/fscache/io.c new file mode 100644 index 000000000000..8ecc1141802f --- /dev/null +++ b/fs/fscache/io.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Cache data I/O routines + * + * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define FSCACHE_DEBUG_LEVEL PAGE +#include +#define FSCACHE_USE_NEW_IO_API +#include +#include +#include +#include "internal.h" + +/* + * Start a cache read operation. + * - we return: + * -ENOMEM - out of memory, some pages may be being read + * -ERESTARTSYS - interrupted, some pages may be being read + * -ENOBUFS - no backing object or space available in which to cache any + * pages not being read + * -ENODATA - no data available in the backing object for some or all of + * the pages + * 0 - dispatched a read on all pages + */ +int __fscache_begin_read_operation(struct netfs_read_request *rreq, + struct fscache_cookie *cookie) +{ + struct fscache_retrieval *op; + struct fscache_object *object; + bool wake_cookie = false; + int ret; + + _enter("rr=%08x", rreq->debug_id); + + fscache_stat(&fscache_n_retrievals); + + if (hlist_empty(&cookie->backing_objects)) + goto nobufs; + + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) { + _leave(" = -ENOBUFS [invalidating]"); + return -ENOBUFS; + } + + ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); + + if (fscache_wait_for_deferred_lookup(cookie) < 0) + return -ERESTARTSYS; + + op = fscache_alloc_retrieval(cookie, NULL, NULL, NULL); + if (!op) + return -ENOMEM; + trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi); + + spin_lock(&cookie->lock); + + if (!fscache_cookie_enabled(cookie) || + hlist_empty(&cookie->backing_objects)) + goto nobufs_unlock; + object = hlist_entry(cookie->backing_objects.first, + struct fscache_object, cookie_link); + + __fscache_use_cookie(cookie); + atomic_inc(&object->n_reads); + __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags); + + if (fscache_submit_op(object, &op->op) < 0) + goto nobufs_unlock_dec; + spin_unlock(&cookie->lock); + + fscache_stat(&fscache_n_retrieval_ops); + + /* we wait for the operation to become active, and then process it + * *here*, in this thread, and not in the thread pool */ + ret = fscache_wait_for_operation_activation( + object, &op->op, + __fscache_stat(&fscache_n_retrieval_op_waits), + __fscache_stat(&fscache_n_retrievals_object_dead)); + if (ret < 0) + goto error; + + /* ask the cache to honour the operation */ + ret = object->cache->ops->begin_read_operation(rreq, op); + +error: + if (ret == -ENOMEM) + fscache_stat(&fscache_n_retrievals_nomem); + else if (ret == -ERESTARTSYS) + fscache_stat(&fscache_n_retrievals_intr); + else if (ret == -ENODATA) + fscache_stat(&fscache_n_retrievals_nodata); + else if (ret < 0) + fscache_stat(&fscache_n_retrievals_nobufs); + else + fscache_stat(&fscache_n_retrievals_ok); + + fscache_put_retrieval(op); + _leave(" = %d", ret); + return ret; + +nobufs_unlock_dec: + atomic_dec(&object->n_reads); + wake_cookie = __fscache_unuse_cookie(cookie); +nobufs_unlock: + spin_unlock(&cookie->lock); + fscache_put_retrieval(op); + if (wake_cookie) + __fscache_wake_unused_cookie(cookie); +nobufs: + fscache_stat(&fscache_n_retrievals_nobufs); + _leave(" = -ENOBUFS"); + return -ENOBUFS; +} +EXPORT_SYMBOL(__fscache_begin_read_operation); diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 26af6fdf1538..991b0a871744 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -299,7 +299,7 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) /* * allocate a retrieval op */ -static struct fscache_retrieval *fscache_alloc_retrieval( +struct fscache_retrieval *fscache_alloc_retrieval( struct fscache_cookie *cookie, struct address_space *mapping, fscache_rw_complete_t end_io_func, diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index a5aa93ece8c5..a7c3ed89a3e0 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -278,5 +278,6 @@ int fscache_stats_show(struct seq_file *m, void *v) atomic_read(&fscache_n_cache_stale_objects), atomic_read(&fscache_n_cache_retired_objects), atomic_read(&fscache_n_cache_culled_objects)); + netfs_stats_show(m); return 0; } -- cgit v1.2.3 From 53b776c77aca99b663a5512a04abc27670d61058 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 26 Apr 2021 21:16:16 +0100 Subject: netfs: Miscellaneous fixes Fix some miscellaneous things in the new netfs lib[1]: (1) The kerneldoc for netfs_readpage() shouldn't say netfs_page(). (2) netfs_readpage() can get an integer overflow on 32-bit when it multiplies page_index(page) by PAGE_SIZE. It should use page_file_offset() instead. (3) netfs_write_begin() should use page_offset() to avoid the same overflow. Note that netfs_readpage() needs to use page_file_offset() rather than page_offset() as it may see swap-over-NFS. Reported-by: Matthew Wilcox (Oracle) Signed-off-by: David Howells Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Jeff Layton Link: https://lore.kernel.org/r/161789062190.6155.12711584466338493050.stgit@warthog.procyon.org.uk/ [1] --- fs/netfs/read_helper.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c index 1d3b50c5db6d..193841d03de0 100644 --- a/fs/netfs/read_helper.c +++ b/fs/netfs/read_helper.c @@ -933,7 +933,7 @@ cleanup: EXPORT_SYMBOL(netfs_readahead); /** - * netfs_page - Helper to manage a readpage request + * netfs_readpage - Helper to manage a readpage request * @file: The file to read from * @page: The page to read * @ops: The network filesystem's operations for the helper to use @@ -968,7 +968,7 @@ int netfs_readpage(struct file *file, return -ENOMEM; } rreq->mapping = page_file_mapping(page); - rreq->start = page_index(page) * PAGE_SIZE; + rreq->start = page_file_offset(page); rreq->len = thp_size(page); if (ops->begin_cache_operation) { @@ -1106,7 +1106,7 @@ retry: if (!rreq) goto error; rreq->mapping = page->mapping; - rreq->start = page->index * PAGE_SIZE; + rreq->start = page_offset(page); rreq->len = thp_size(page); rreq->no_unlock_page = page->index; __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags); -- cgit v1.2.3