Commit b2648015 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Make the rpciod and xprtiod slab allocation modes consistent

Make sure that rpciod and xprtiod are always using the same slab
allocation modes.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 059ee82b
...@@ -75,9 +75,9 @@ static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) ...@@ -75,9 +75,9 @@ static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
return 0; return 0;
} }
static static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
{ {
gfp_t gfp_flags = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
struct rpc_rqst *req; struct rpc_rqst *req;
/* Pre-allocate one backchannel rpc_rqst */ /* Pre-allocate one backchannel rpc_rqst */
...@@ -154,7 +154,7 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs) ...@@ -154,7 +154,7 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
INIT_LIST_HEAD(&tmp_list); INIT_LIST_HEAD(&tmp_list);
for (i = 0; i < min_reqs; i++) { for (i = 0; i < min_reqs; i++) {
/* Pre-allocate one backchannel rpc_rqst */ /* Pre-allocate one backchannel rpc_rqst */
req = xprt_alloc_bc_req(xprt, GFP_KERNEL); req = xprt_alloc_bc_req(xprt);
if (req == NULL) { if (req == NULL) {
printk(KERN_ERR "Failed to create bc rpc_rqst\n"); printk(KERN_ERR "Failed to create bc rpc_rqst\n");
goto out_free; goto out_free;
...@@ -343,7 +343,7 @@ struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) ...@@ -343,7 +343,7 @@ struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
break; break;
} else if (req) } else if (req)
break; break;
new = xprt_alloc_bc_req(xprt, GFP_KERNEL); new = xprt_alloc_bc_req(xprt);
} while (new); } while (new);
return req; return req;
} }
......
...@@ -714,7 +714,7 @@ void rpcb_getport_async(struct rpc_task *task) ...@@ -714,7 +714,7 @@ void rpcb_getport_async(struct rpc_task *task)
goto bailout_nofree; goto bailout_nofree;
} }
map = kzalloc(sizeof(struct rpcbind_args), GFP_KERNEL); map = kzalloc(sizeof(struct rpcbind_args), rpc_task_gfp_mask());
if (!map) { if (!map) {
status = -ENOMEM; status = -ENOMEM;
goto bailout_release_client; goto bailout_release_client;
...@@ -730,7 +730,7 @@ void rpcb_getport_async(struct rpc_task *task) ...@@ -730,7 +730,7 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_4: case RPCBVERS_4:
case RPCBVERS_3: case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); map->r_addr = rpc_sockaddr2uaddr(sap, rpc_task_gfp_mask());
if (!map->r_addr) { if (!map->r_addr) {
status = -ENOMEM; status = -ENOMEM;
goto bailout_free_args; goto bailout_free_args;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h> #include <linux/sunrpc/xdr.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -222,7 +223,7 @@ static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, ...@@ -222,7 +223,7 @@ static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
{ {
int err; int err;
err = xdr_alloc_bvec(xdr, GFP_KERNEL); err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -1679,15 +1679,12 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task ...@@ -1679,15 +1679,12 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
{ {
struct rpc_rqst *req = ERR_PTR(-EAGAIN); struct rpc_rqst *req = ERR_PTR(-EAGAIN);
gfp_t gfp_mask = GFP_KERNEL;
if (xprt->num_reqs >= xprt->max_reqs) if (xprt->num_reqs >= xprt->max_reqs)
goto out; goto out;
++xprt->num_reqs; ++xprt->num_reqs;
spin_unlock(&xprt->reserve_lock); spin_unlock(&xprt->reserve_lock);
if (current->flags & PF_WQ_WORKER) req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
req = kzalloc(sizeof(*req), gfp_mask);
spin_lock(&xprt->reserve_lock); spin_lock(&xprt->reserve_lock);
if (req != NULL) if (req != NULL)
goto out; goto out;
......
...@@ -428,9 +428,9 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, ...@@ -428,9 +428,9 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
offset += want; offset += want;
} }
want = xs_alloc_sparse_pages(buf, want = xs_alloc_sparse_pages(
min_t(size_t, count - offset, buf->page_len), buf, min_t(size_t, count - offset, buf->page_len),
GFP_KERNEL); GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
if (seek < want) { if (seek < want) {
ret = xs_read_bvec(sock, msg, flags, buf->bvec, ret = xs_read_bvec(sock, msg, flags, buf->bvec,
xdr_buf_pagecount(buf), xdr_buf_pagecount(buf),
...@@ -826,7 +826,8 @@ static void ...@@ -826,7 +826,8 @@ static void
xs_stream_prepare_request(struct rpc_rqst *req) xs_stream_prepare_request(struct rpc_rqst *req)
{ {
xdr_free_bvec(&req->rq_rcv_buf); xdr_free_bvec(&req->rq_rcv_buf);
req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL); req->rq_task->tk_status = xdr_alloc_bvec(
&req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
} }
/* /*
...@@ -2487,7 +2488,7 @@ static int bc_malloc(struct rpc_task *task) ...@@ -2487,7 +2488,7 @@ static int bc_malloc(struct rpc_task *task)
return -EINVAL; return -EINVAL;
} }
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment