Commit f6e70aab authored by Chuck Lever's avatar Chuck Lever Committed by Linus Torvalds

SUNRPC: refresh rq_pages using a bulk page allocator

Reduce the rate at which nfsd threads hammer on the page allocator.  This
improves throughput scalability by enabling the threads to run more
independently of each other.

[mgorman: Update interpretation of alloc_pages_bulk return value]

Link: https://lkml.kernel.org/r/20210325114228.27719-8-mgorman@techsingularity.netSigned-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Reviewed-by: default avatarAlexander Lobakin <alobakin@pm.me>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Miller <davem@davemloft.net>
Cc: Ilias Apalodimas <ilias.apalodimas@linaro.org>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ab836264
...@@ -662,21 +662,22 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) ...@@ -662,21 +662,22 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
{ {
struct svc_serv *serv = rqstp->rq_server; struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg = &rqstp->rq_arg; struct xdr_buf *arg = &rqstp->rq_arg;
int pages; unsigned long pages, filled;
int i;
/* now allocate needed pages. If we get a failure, sleep briefly */
pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT;
if (pages > RPCSVC_MAXPAGES) { if (pages > RPCSVC_MAXPAGES) {
pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", pr_warn_once("svc: warning: pages=%lu > RPCSVC_MAXPAGES=%lu\n",
pages, RPCSVC_MAXPAGES); pages, RPCSVC_MAXPAGES);
/* use as many pages as possible */ /* use as many pages as possible */
pages = RPCSVC_MAXPAGES; pages = RPCSVC_MAXPAGES;
} }
for (i = 0; i < pages ; i++)
while (rqstp->rq_pages[i] == NULL) { for (;;) {
struct page *p = alloc_page(GFP_KERNEL); filled = alloc_pages_bulk_array(GFP_KERNEL, pages,
if (!p) { rqstp->rq_pages);
if (filled == pages)
break;
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (signalled() || kthread_should_stop()) { if (signalled() || kthread_should_stop()) {
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
...@@ -684,8 +685,6 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) ...@@ -684,8 +685,6 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
} }
schedule_timeout(msecs_to_jiffies(500)); schedule_timeout(msecs_to_jiffies(500));
} }
rqstp->rq_pages[i] = p;
}
rqstp->rq_page_end = &rqstp->rq_pages[pages]; rqstp->rq_page_end = &rqstp->rq_pages[pages];
rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment