Commit 12a3ad61 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Convert remaining GFP_NOIO, and GFP_NOWAIT sites in sunrpc

Convert the remaining gfp_flags arguments in sunrpc to standard reclaiming
allocations, now that we set memalloc_nofs_save() as appropriate.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent cefa587a
...@@ -1208,7 +1208,7 @@ gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred) ...@@ -1208,7 +1208,7 @@ gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
struct gss_cred *new; struct gss_cred *new;
/* Make a copy of the cred so that we can reference count it */ /* Make a copy of the cred so that we can reference count it */
new = kzalloc(sizeof(*gss_cred), GFP_NOIO); new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
if (new) { if (new) {
struct auth_cred acred = { struct auth_cred acred = {
.cred = gss_cred->gc_base.cr_cred, .cred = gss_cred->gc_base.cr_cred,
......
...@@ -925,16 +925,13 @@ static void rpc_async_schedule(struct work_struct *work) ...@@ -925,16 +925,13 @@ static void rpc_async_schedule(struct work_struct *work)
* Most requests are 'small' (under 2KiB) and can be serviced from a * Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed, * mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers. * and that there is good locality of reference for these buffers.
*
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we avoid using GFP_KERNEL.
*/ */
int rpc_malloc(struct rpc_task *task) int rpc_malloc(struct rpc_task *task)
{ {
struct rpc_rqst *rqst = task->tk_rqstp; struct rpc_rqst *rqst = task->tk_rqstp;
size_t size = rqst->rq_callsize + rqst->rq_rcvsize; size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
struct rpc_buffer *buf; struct rpc_buffer *buf;
gfp_t gfp = GFP_NOIO | __GFP_NOWARN; gfp_t gfp = GFP_NOFS;
if (RPC_IS_SWAPPER(task)) if (RPC_IS_SWAPPER(task))
gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
...@@ -1015,7 +1012,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta ...@@ -1015,7 +1012,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
static struct rpc_task * static struct rpc_task *
rpc_alloc_task(void) rpc_alloc_task(void)
{ {
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO); return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
} }
/* /*
......
...@@ -423,7 +423,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, ...@@ -423,7 +423,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
want = xs_alloc_sparse_pages(buf, want = xs_alloc_sparse_pages(buf,
min_t(size_t, count - offset, buf->page_len), min_t(size_t, count - offset, buf->page_len),
GFP_NOWAIT); GFP_KERNEL);
if (seek < want) { if (seek < want) {
ret = xs_read_bvec(sock, msg, flags, buf->bvec, ret = xs_read_bvec(sock, msg, flags, buf->bvec,
xdr_buf_pagecount(buf), xdr_buf_pagecount(buf),
...@@ -909,7 +909,7 @@ static int xs_nospace(struct rpc_rqst *req) ...@@ -909,7 +909,7 @@ static int xs_nospace(struct rpc_rqst *req)
static void static void
xs_stream_prepare_request(struct rpc_rqst *req) xs_stream_prepare_request(struct rpc_rqst *req)
{ {
req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO); req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment