Commit c742b634 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-5.8' of git://linux-nfs.org/~bfields/linux

Pull nfsd updates from Bruce Fields:
 "Highlights:

   - Keep nfsd clients from unnecessarily breaking their own
     delegations.

     Note this requires a small kthreadd addition. The result is Tejun
     Heo's suggestion (see link), and he was OK with this going through
     my tree.

   - Patch nfsd/clients/ to display filenames, and to fix byte-order
     when displaying stateid's.

   - fix a module loading/unloading bug, from Neil Brown.

   - A big series from Chuck Lever with RPC/RDMA and tracing
     improvements, and lay some groundwork for RPC-over-TLS"

Link: https://lore.kernel.org/r/1588348912-24781-1-git-send-email-bfields@redhat.com

* tag 'nfsd-5.8' of git://linux-nfs.org/~bfields/linux: (49 commits)
  sunrpc: use kmemdup_nul() in gssp_stringify()
  nfsd: safer handling of corrupted c_type
  nfsd4: make drc_slab global, not per-net
  SUNRPC: Remove unreachable error condition in rpcb_getport_async()
  nfsd: Fix svc_xprt refcnt leak when setup callback client failed
  sunrpc: clean up properly in gss_mech_unregister()
  sunrpc: svcauth_gss_register_pseudoflavor must reject duplicate registrations.
  sunrpc: check that domain table is empty at module unload.
  NFSD: Fix improperly-formatted Doxygen comments
  NFSD: Squash an annoying compiler warning
  SUNRPC: Clean up request deferral tracepoints
  NFSD: Add tracepoints for monitoring NFSD callbacks
  NFSD: Add tracepoints to the NFSD state management code
  NFSD: Add tracepoints to NFSD's duplicate reply cache
  SUNRPC: svc_show_status() macro should have enum definitions
  SUNRPC: Restructure svc_udp_recvfrom()
  SUNRPC: Refactor svc_recvfrom()
  SUNRPC: Clean up svc_release_skb() functions
  SUNRPC: Refactor recvfrom path dealing with incomplete TCP receives
  SUNRPC: Replace dprintk() call sites in TCP receive path
  ...
parents b29482fd 1eb2f96d
...@@ -429,6 +429,7 @@ prototypes:: ...@@ -429,6 +429,7 @@ prototypes::
int (*lm_grant)(struct file_lock *, struct file_lock *, int); int (*lm_grant)(struct file_lock *, struct file_lock *, int);
void (*lm_break)(struct file_lock *); /* break_lease callback */ void (*lm_break)(struct file_lock *); /* break_lease callback */
int (*lm_change)(struct file_lock **, int); int (*lm_change)(struct file_lock **, int);
bool (*lm_breaker_owns_lease)(struct file_lock *);
locking rules: locking rules:
...@@ -439,6 +440,7 @@ lm_notify: yes yes no ...@@ -439,6 +440,7 @@ lm_notify: yes yes no
lm_grant: no no no lm_grant: no no no
lm_break: yes no no lm_break: yes no no
lm_change yes no no lm_change yes no no
lm_breaker_owns_lease: no no no
========== ============= ================= ========= ========== ============= ================= =========
buffer_head buffer_head
......
...@@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) ...@@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{ {
bool rc; bool rc;
if (lease->fl_lmops->lm_breaker_owns_lease
&& lease->fl_lmops->lm_breaker_owns_lease(lease))
return false;
if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) { if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
rc = false; rc = false;
goto trace; goto trace;
......
...@@ -78,6 +78,8 @@ enum { ...@@ -78,6 +78,8 @@ enum {
/* Checksum this amount of the request */ /* Checksum this amount of the request */
#define RC_CSUMLEN (256U) #define RC_CSUMLEN (256U)
int nfsd_drc_slab_create(void);
void nfsd_drc_slab_free(void);
int nfsd_reply_cache_init(struct nfsd_net *); int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *);
int nfsd_cache_lookup(struct svc_rqst *); int nfsd_cache_lookup(struct svc_rqst *);
......
...@@ -139,7 +139,6 @@ struct nfsd_net { ...@@ -139,7 +139,6 @@ struct nfsd_net {
* Duplicate reply cache * Duplicate reply cache
*/ */
struct nfsd_drc_bucket *drc_hashtbl; struct nfsd_drc_bucket *drc_hashtbl;
struct kmem_cache *drc_slab;
/* max number of entries allowed in the cache */ /* max number of entries allowed in the cache */
unsigned int max_drc_entries; unsigned int max_drc_entries;
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "nfsd.h" #include "nfsd.h"
#include "state.h" #include "state.h"
#include "netns.h" #include "netns.h"
#include "trace.h"
#include "xdr4cb.h" #include "xdr4cb.h"
#include "xdr4.h" #include "xdr4.h"
...@@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c ...@@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
if (clp->cl_minorversion == 0) { if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal && if (!clp->cl_cred.cr_principal &&
(clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL; return -EINVAL;
}
args.client_name = clp->cl_cred.cr_principal; args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog; args.prognumber = conn->cb_prog;
args.protocol = XPRT_TRANSPORT_TCP; args.protocol = XPRT_TRANSPORT_TCP;
args.authflavor = clp->cl_cred.cr_flavor; args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident; clp->cl_cb_ident = conn->cb_ident;
} else { } else {
if (!conn->cb_xprt) if (!conn->cb_xprt) {
trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL; return -EINVAL;
}
clp->cl_cb_conn.cb_xprt = conn->cb_xprt; clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses; clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt; args.bc_xprt = conn->cb_xprt;
...@@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c ...@@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
/* Create RPC client */ /* Create RPC client */
client = rpc_create(&args); client = rpc_create(&args);
if (IS_ERR(client)) { if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n", trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
PTR_ERR(client));
return PTR_ERR(client); return PTR_ERR(client);
} }
cred = get_backchannel_cred(clp, client, ses); cred = get_backchannel_cred(clp, client, ses);
if (!cred) { if (!cred) {
trace_nfsd_cb_setup_err(clp, -ENOMEM);
rpc_shutdown_client(client); rpc_shutdown_client(client);
return -ENOMEM; return -ENOMEM;
} }
clp->cl_cb_client = client; clp->cl_cb_client = client;
clp->cl_cb_cred = cred; clp->cl_cb_cred = cred;
trace_nfsd_cb_setup(clp);
return 0; return 0;
} }
static void warn_no_callback_path(struct nfs4_client *clp, int reason)
{
dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
(int)clp->cl_name.len, clp->cl_name.data, reason);
}
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason) static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{ {
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return; return;
clp->cl_cb_state = NFSD4_CB_DOWN; clp->cl_cb_state = NFSD4_CB_DOWN;
warn_no_callback_path(clp, reason); trace_nfsd_cb_state(clp);
} }
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason) static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
...@@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason) ...@@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return; return;
clp->cl_cb_state = NFSD4_CB_FAULT; clp->cl_cb_state = NFSD4_CB_FAULT;
warn_no_callback_path(clp, reason); trace_nfsd_cb_state(clp);
} }
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{ {
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
trace_nfsd_cb_done(clp, task->tk_status);
if (task->tk_status) if (task->tk_status)
nfsd4_mark_cb_down(clp, task->tk_status); nfsd4_mark_cb_down(clp, task->tk_status);
else else {
clp->cl_cb_state = NFSD4_CB_UP; clp->cl_cb_state = NFSD4_CB_UP;
trace_nfsd_cb_state(clp);
}
} }
static void nfsd4_cb_probe_release(void *calldata) static void nfsd4_cb_probe_release(void *calldata)
...@@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = { ...@@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
void nfsd4_probe_callback(struct nfs4_client *clp) void nfsd4_probe_callback(struct nfs4_client *clp)
{ {
clp->cl_cb_state = NFSD4_CB_UNKNOWN; clp->cl_cb_state = NFSD4_CB_UNKNOWN;
trace_nfsd_cb_state(clp);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags); set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null); nfsd4_run_cb(&clp->cl_cb_null);
} }
...@@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) ...@@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
spin_lock(&clp->cl_lock); spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
trace_nfsd_cb_state(clp);
} }
/* /*
...@@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) ...@@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata; struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp; struct nfs4_client *clp = cb->cb_clp;
dprintk("%s: minorversion=%d\n", __func__, trace_nfsd_cb_done(clp, task->tk_status);
clp->cl_minorversion);
if (!nfsd4_cb_sequence_done(task, cb)) if (!nfsd4_cb_sequence_done(task, cb))
return; return;
...@@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) ...@@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
* kill the old client: * kill the old client:
*/ */
if (clp->cl_cb_client) { if (clp->cl_cb_client) {
trace_nfsd_cb_shutdown(clp);
rpc_shutdown_client(clp->cl_cb_client); rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL; clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred); put_cred(clp->cl_cb_cred);
...@@ -1301,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) ...@@ -1301,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses); err = setup_callback_client(clp, &conn, ses);
if (err) { if (err) {
nfsd4_mark_cb_down(clp, err); nfsd4_mark_cb_down(clp, err);
if (c)
svc_xprt_put(c->cn_xprt);
return; return;
} }
} }
...@@ -1314,6 +1321,8 @@ nfsd4_run_cb_work(struct work_struct *work) ...@@ -1314,6 +1321,8 @@ nfsd4_run_cb_work(struct work_struct *work)
struct rpc_clnt *clnt; struct rpc_clnt *clnt;
int flags; int flags;
trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
if (cb->cb_need_restart) { if (cb->cb_need_restart) {
cb->cb_need_restart = false; cb->cb_need_restart = false;
} else { } else {
......
...@@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb); ...@@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb);
#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys" #define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
/** /*
* Support one copy source server for now. * Support one copy source server for now.
*/ */
static __be32 static __be32
...@@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt) ...@@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
mntput(ss_mnt); mntput(ss_mnt);
} }
/** /*
* nfsd4_setup_inter_ssc
*
* Verify COPY destination stateid. * Verify COPY destination stateid.
*
* Connect to the source server with NFSv4.1. * Connect to the source server with NFSv4.1.
* Create the source struct file for nfsd_copy_range. * Create the source struct file for nfsd_copy_range.
* Called with COPY cstate: * Called with COPY cstate:
...@@ -2302,6 +2301,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) ...@@ -2302,6 +2301,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
} }
check_if_stalefh_allowed(args); check_if_stalefh_allowed(args);
rqstp->rq_lease_breaker = (void **)&cstate->clp;
trace_nfsd_compound(rqstp, args->opcnt); trace_nfsd_compound(rqstp, args->opcnt);
while (!status && resp->opcnt < args->opcnt) { while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++]; op = &args->ops[resp->opcnt++];
......
This diff is collapsed.
...@@ -20,8 +20,7 @@ ...@@ -20,8 +20,7 @@
#include "nfsd.h" #include "nfsd.h"
#include "cache.h" #include "cache.h"
#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
/* /*
* We use this value to determine the number of hash buckets from the max * We use this value to determine the number of hash buckets from the max
...@@ -36,6 +35,8 @@ struct nfsd_drc_bucket { ...@@ -36,6 +35,8 @@ struct nfsd_drc_bucket {
spinlock_t cache_lock; spinlock_t cache_lock;
}; };
static struct kmem_cache *drc_slab;
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc); struct shrink_control *sc);
...@@ -95,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum, ...@@ -95,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
{ {
struct svc_cacherep *rp; struct svc_cacherep *rp;
rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL); rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) { if (rp) {
rp->c_state = RC_UNUSED; rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE; rp->c_type = RC_NOCACHE;
...@@ -129,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, ...@@ -129,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
atomic_dec(&nn->num_drc_entries); atomic_dec(&nn->num_drc_entries);
nn->drc_mem_usage -= sizeof(*rp); nn->drc_mem_usage -= sizeof(*rp);
} }
kmem_cache_free(nn->drc_slab, rp); kmem_cache_free(drc_slab, rp);
} }
static void static void
...@@ -141,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp, ...@@ -141,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
spin_unlock(&b->cache_lock); spin_unlock(&b->cache_lock);
} }
int nfsd_drc_slab_create(void)
{
drc_slab = kmem_cache_create("nfsd_drc",
sizeof(struct svc_cacherep), 0, 0, NULL);
return drc_slab ? 0: -ENOMEM;
}
void nfsd_drc_slab_free(void)
{
kmem_cache_destroy(drc_slab);
}
int nfsd_reply_cache_init(struct nfsd_net *nn) int nfsd_reply_cache_init(struct nfsd_net *nn)
{ {
unsigned int hashsize; unsigned int hashsize;
...@@ -159,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) ...@@ -159,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
if (status) if (status)
goto out_nomem; goto out_nomem;
nn->drc_slab = kmem_cache_create("nfsd_drc",
sizeof(struct svc_cacherep), 0, 0, NULL);
if (!nn->drc_slab)
goto out_shrinker;
nn->drc_hashtbl = kcalloc(hashsize, nn->drc_hashtbl = kcalloc(hashsize,
sizeof(*nn->drc_hashtbl), GFP_KERNEL); sizeof(*nn->drc_hashtbl), GFP_KERNEL);
if (!nn->drc_hashtbl) { if (!nn->drc_hashtbl) {
nn->drc_hashtbl = vzalloc(array_size(hashsize, nn->drc_hashtbl = vzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl))); sizeof(*nn->drc_hashtbl)));
if (!nn->drc_hashtbl) if (!nn->drc_hashtbl)
goto out_slab; goto out_shrinker;
} }
for (i = 0; i < hashsize; i++) { for (i = 0; i < hashsize; i++) {
...@@ -180,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn) ...@@ -180,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
nn->drc_hashsize = hashsize; nn->drc_hashsize = hashsize;
return 0; return 0;
out_slab:
kmem_cache_destroy(nn->drc_slab);
out_shrinker: out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker); unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
out_nomem: out_nomem:
...@@ -209,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn) ...@@ -209,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
nn->drc_hashtbl = NULL; nn->drc_hashtbl = NULL;
nn->drc_hashsize = 0; nn->drc_hashsize = 0;
kmem_cache_destroy(nn->drc_slab);
nn->drc_slab = NULL;
} }
/* /*
...@@ -323,8 +327,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key, ...@@ -323,8 +327,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
const struct svc_cacherep *rp, struct nfsd_net *nn) const struct svc_cacherep *rp, struct nfsd_net *nn)
{ {
if (key->c_key.k_xid == rp->c_key.k_xid && if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) key->c_key.k_csum != rp->c_key.k_csum) {
++nn->payload_misses; ++nn->payload_misses;
trace_nfsd_drc_mismatch(nn, key, rp);
}
return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key)); return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
} }
...@@ -377,15 +383,22 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key, ...@@ -377,15 +383,22 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
return ret; return ret;
} }
/* /**
* nfsd_cache_lookup - Find an entry in the duplicate reply cache
* @rqstp: Incoming Call to find
*
* Try to find an entry matching the current call in the cache. When none * Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If * is found, we try to grab the oldest expired entry off the LRU list. If
* a suitable one isn't there, then drop the cache_lock and allocate a * a suitable one isn't there, then drop the cache_lock and allocate a
* new one, then search again in case one got inserted while this thread * new one, then search again in case one got inserted while this thread
* didn't hold the lock. * didn't hold the lock.
*
* Return values:
* %RC_DOIT: Process the request normally
* %RC_REPLY: Reply from cache
* %RC_DROPIT: Do not process the request further
*/ */
int int nfsd_cache_lookup(struct svc_rqst *rqstp)
nfsd_cache_lookup(struct svc_rqst *rqstp)
{ {
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp, *found; struct svc_cacherep *rp, *found;
...@@ -399,7 +412,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -399,7 +412,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL; rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) { if (type == RC_NOCACHE) {
nfsdstats.rcnocache++; nfsdstats.rcnocache++;
return rtn; goto out;
} }
csum = nfsd_cache_csum(rqstp); csum = nfsd_cache_csum(rqstp);
...@@ -409,10 +422,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -409,10 +422,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* preallocate an entry. * preallocate an entry.
*/ */
rp = nfsd_reply_cache_alloc(rqstp, csum, nn); rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
if (!rp) { if (!rp)
dprintk("nfsd: unable to allocate DRC entry!\n"); goto out;
return rtn;
}
spin_lock(&b->cache_lock); spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn); found = nfsd_cache_insert(b, rp, nn);
...@@ -431,8 +442,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -431,8 +442,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
/* go ahead and prune the cache */ /* go ahead and prune the cache */
prune_bucket(b, nn); prune_bucket(b, nn);
out:
out_unlock:
spin_unlock(&b->cache_lock); spin_unlock(&b->cache_lock);
out:
return rtn; return rtn;
found_entry: found_entry:
...@@ -442,13 +455,13 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -442,13 +455,13 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
/* Request being processed */ /* Request being processed */
if (rp->c_state == RC_INPROG) if (rp->c_state == RC_INPROG)
goto out; goto out_trace;
/* From the hall of fame of impractical attacks: /* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */ * Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT; rtn = RC_DOIT;
if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure) if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
goto out; goto out_trace;
/* Compose RPC reply header */ /* Compose RPC reply header */
switch (rp->c_type) { switch (rp->c_type) {
...@@ -460,21 +473,26 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -460,21 +473,26 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
break; break;
case RC_REPLBUFF: case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec)) if (!nfsd_cache_append(rqstp, &rp->c_replvec))
goto out; /* should not happen */ goto out_unlock; /* should not happen */
rtn = RC_REPLY; rtn = RC_REPLY;
break; break;
default: default:
printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
nfsd_reply_cache_free_locked(b, rp, nn);
} }
goto out; out_trace:
trace_nfsd_drc_found(nn, rqstp, rtn);
goto out_unlock;
} }
/* /**
* Update a cache entry. This is called from nfsd_dispatch when * nfsd_cache_update - Update an entry in the duplicate reply cache.
* the procedure has been executed and the complete reply is in * @rqstp: svc_rqst with a finished Reply
* rqstp->rq_res. * @cachetype: which cache to update
* @statp: Reply's status code
*
* This is called from nfsd_dispatch when the procedure has been
* executed and the complete reply is in rqstp->rq_res.
* *
* We're copying around data here rather than swapping buffers because * We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste * the toplevel loop requires max-sized buffers, which would be a waste
...@@ -487,8 +505,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -487,8 +505,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* nfsd failed to encode a reply that otherwise would have been cached. * nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL. * In this case, nfsd_cache_update is called with statp == NULL.
*/ */
void void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{ {
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp = rqstp->rq_cacherep; struct svc_cacherep *rp = rqstp->rq_cacherep;
......
...@@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file) ...@@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file)
return file_inode(file)->i_sb->s_fs_info; return file_inode(file)->i_sb->s_fs_info;
} }
/** /*
* write_unlock_ip - Release all locks used by a client * write_unlock_ip - Release all locks used by a client
* *
* Experimental. * Experimental.
...@@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size) ...@@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
return nlmsvc_unlock_all_by_ip(sap); return nlmsvc_unlock_all_by_ip(sap);
} }
/** /*
* write_unlock_fs - Release all locks on a local file system * write_unlock_fs - Release all locks on a local file system
* *
* Experimental. * Experimental.
...@@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size) ...@@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
return error; return error;
} }
/** /*
* write_filehandle - Get a variable-length NFS file handle by path * write_filehandle - Get a variable-length NFS file handle by path
* *
* On input, the buffer contains a '\n'-terminated C string comprised of * On input, the buffer contains a '\n'-terminated C string comprised of
...@@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size) ...@@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
return mesg - buf; return mesg - buf;
} }
/** /*
* write_threads - Start NFSD, or report the current number of running threads * write_threads - Start NFSD, or report the current number of running threads
* *
* Input: * Input:
...@@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size) ...@@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv); return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
} }
/** /*
* write_pool_threads - Set or report the current number of threads per pool * write_pool_threads - Set or report the current number of threads per pool
* *
* Input: * Input:
...@@ -661,7 +661,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size) ...@@ -661,7 +661,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
return tlen + len; return tlen + len;
} }
/** /*
* write_versions - Set or report the available NFS protocol versions * write_versions - Set or report the available NFS protocol versions
* *
* Input: * Input:
...@@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size, ...@@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
return -EINVAL; return -EINVAL;
} }
/** /*
* write_ports - Pass a socket file descriptor or transport name to listen on * write_ports - Pass a socket file descriptor or transport name to listen on
* *
* Input: * Input:
...@@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size) ...@@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
int nfsd_max_blksize; int nfsd_max_blksize;
/** /*
* write_maxblksize - Set or report the current NFS blksize * write_maxblksize - Set or report the current NFS blksize
* *
* Input: * Input:
...@@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) ...@@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize); nfsd_max_blksize);
} }
/** /*
* write_maxconn - Set or report the current max number of connections * write_maxconn - Set or report the current max number of connections
* *
* Input: * Input:
...@@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, ...@@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
return rv; return rv;
} }
/** /*
* write_leasetime - Set or report the current NFSv4 lease time * write_leasetime - Set or report the current NFSv4 lease time
* *
* Input: * Input:
...@@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size) ...@@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn); return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
} }
/** /*
* write_gracetime - Set or report current NFSv4 grace period time * write_gracetime - Set or report current NFSv4 grace period time
* *
* As above, but sets the time of the NFSv4 grace period. * As above, but sets the time of the NFSv4 grace period.
...@@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size, ...@@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
nfs4_recoverydir()); nfs4_recoverydir());
} }
/** /*
* write_recoverydir - Set or report the pathname of the recovery directory * write_recoverydir - Set or report the pathname of the recovery directory
* *
* Input: * Input:
...@@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size) ...@@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
return rv; return rv;
} }
/** /*
* write_v4_end_grace - release grace period for nfsd's v4.x lock manager * write_v4_end_grace - release grace period for nfsd's v4.x lock manager
* *
* Input: * Input:
...@@ -1533,6 +1533,9 @@ static int __init init_nfsd(void) ...@@ -1533,6 +1533,9 @@ static int __init init_nfsd(void)
goto out_free_slabs; goto out_free_slabs;
nfsd_fault_inject_init(); /* nfsd fault injection controls */ nfsd_fault_inject_init(); /* nfsd fault injection controls */
nfsd_stat_init(); /* Statistics */ nfsd_stat_init(); /* Statistics */
retval = nfsd_drc_slab_create();
if (retval)
goto out_free_stat;
nfsd_lockd_init(); /* lockd->nfsd callbacks */ nfsd_lockd_init(); /* lockd->nfsd callbacks */
retval = create_proc_exports_entry(); retval = create_proc_exports_entry();
if (retval) if (retval)
...@@ -1546,6 +1549,8 @@ static int __init init_nfsd(void) ...@@ -1546,6 +1549,8 @@ static int __init init_nfsd(void)
remove_proc_entry("fs/nfs", NULL); remove_proc_entry("fs/nfs", NULL);
out_free_lockd: out_free_lockd:
nfsd_lockd_shutdown(); nfsd_lockd_shutdown();
nfsd_drc_slab_free();
out_free_stat:
nfsd_stat_shutdown(); nfsd_stat_shutdown();
nfsd_fault_inject_cleanup(); nfsd_fault_inject_cleanup();
nfsd4_exit_pnfs(); nfsd4_exit_pnfs();
...@@ -1560,6 +1565,7 @@ static int __init init_nfsd(void) ...@@ -1560,6 +1565,7 @@ static int __init init_nfsd(void)
static void __exit exit_nfsd(void) static void __exit exit_nfsd(void)
{ {
nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL); remove_proc_entry("fs/nfs", NULL);
nfsd_stat_shutdown(); nfsd_stat_shutdown();
......
...@@ -88,6 +88,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *); ...@@ -88,6 +88,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_destroy(struct net *net); void nfsd_destroy(struct net *net);
bool i_am_nfsd(void);
struct nfsdfs_client { struct nfsdfs_client {
struct kref cl_ref; struct kref cl_ref;
void (*cl_release)(struct kref *kref); void (*cl_release)(struct kref *kref);
......
...@@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = { ...@@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_module = THIS_MODULE, .svo_module = THIS_MODULE,
}; };
bool i_am_nfsd(void)
{
return kthread_func(current) == nfsd;
}
int nfsd_create_serv(struct net *net) int nfsd_create_serv(struct net *net)
{ {
int error; int error;
...@@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) ...@@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*statp = rpc_garbage_args; *statp = rpc_garbage_args;
return 1; return 1;
} }
rqstp->rq_lease_breaker = NULL;
/* /*
* Give the xdr decoder a chance to change this if it wants * Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case) * (necessary in the NFSv4.0 compound case)
......
...@@ -64,13 +64,6 @@ typedef struct { ...@@ -64,13 +64,6 @@ typedef struct {
refcount_t sc_count; refcount_t sc_count;
} copy_stateid_t; } copy_stateid_t;
#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
#define STATEID_VAL(s) \
(s)->si_opaque.so_clid.cl_boot, \
(s)->si_opaque.so_clid.cl_id, \
(s)->si_opaque.so_id, \
(s)->si_generation
struct nfsd4_callback { struct nfsd4_callback {
struct nfs4_client *cb_clp; struct nfs4_client *cb_clp;
struct rpc_message cb_msg; struct rpc_message cb_msg;
......
This diff is collapsed.
...@@ -1048,6 +1048,7 @@ struct lock_manager_operations { ...@@ -1048,6 +1048,7 @@ struct lock_manager_operations {
bool (*lm_break)(struct file_lock *); bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *); int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **); void (*lm_setup)(struct file_lock *, void **);
bool (*lm_breaker_owns_lease)(struct file_lock *);
}; };
struct lock_manager { struct lock_manager {
......
...@@ -57,6 +57,7 @@ bool kthread_should_stop(void); ...@@ -57,6 +57,7 @@ bool kthread_should_stop(void);
bool kthread_should_park(void); bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k); bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen); bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k); void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k); int kthread_park(struct task_struct *k);
......
...@@ -84,6 +84,7 @@ struct pf_desc { ...@@ -84,6 +84,7 @@ struct pf_desc {
u32 service; u32 service;
char *name; char *name;
char *auth_domain_name; char *auth_domain_name;
struct auth_domain *domain;
bool datatouch; bool datatouch;
}; };
......
...@@ -254,6 +254,7 @@ struct svc_rqst { ...@@ -254,6 +254,7 @@ struct svc_rqst {
struct page * *rq_page_end; /* one past the last page */ struct page * *rq_page_end; /* one past the last page */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
__be32 rq_xid; /* transmission id */ __be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */ u32 rq_prog; /* program number */
...@@ -299,6 +300,7 @@ struct svc_rqst { ...@@ -299,6 +300,7 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's struct net *rq_bc_net; /* pointer to backchannel's
* net namespace * net namespace
*/ */
void ** rq_lease_breaker; /* The v4 client breaking a lease */
}; };
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
......
...@@ -48,7 +48,6 @@ ...@@ -48,7 +48,6 @@
#include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
#define SVCRDMA_DEBUG
/* Default and maximum inline threshold sizes */ /* Default and maximum inline threshold sizes */
enum { enum {
...@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt { ...@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt {
}; };
/* svc_rdma_backchannel.c */ /* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
__be32 *rdma_resp, struct svc_rdma_recv_ctxt *rctxt);
struct xdr_buf *rcvbuf);
/* svc_rdma_recvfrom.c */ /* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
......
...@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u ...@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
return 0; return 0;
} }
static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
{
return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
(test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
}
int svc_reg_xprt_class(struct svc_xprt_class *); int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
......
...@@ -20,7 +20,8 @@ int gss_svc_init(void); ...@@ -20,7 +20,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void); void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net); int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net); void gss_svc_shutdown_net(struct net *net);
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom); u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
...@@ -28,7 +28,7 @@ struct svc_sock { ...@@ -28,7 +28,7 @@ struct svc_sock {
/* private TCP part */ /* private TCP part */
/* On-the-wire fragment header: */ /* On-the-wire fragment header: */
__be32 sk_reclen; __be32 sk_marker;
/* As we receive a record, this includes the length received so /* As we receive a record, this includes the length received so
* far (including the fragment header): */ * far (including the fragment header): */
u32 sk_tcplen; u32 sk_tcplen;
...@@ -41,12 +41,12 @@ struct svc_sock { ...@@ -41,12 +41,12 @@ struct svc_sock {
static inline u32 svc_sock_reclen(struct svc_sock *svsk) static inline u32 svc_sock_reclen(struct svc_sock *svsk)
{ {
return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
} }
static inline u32 svc_sock_final_rec(struct svc_sock *svsk) static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
{ {
return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
} }
/* /*
......
...@@ -1279,38 +1279,42 @@ TRACE_EVENT(xprtrdma_leaked_rep, ...@@ -1279,38 +1279,42 @@ TRACE_EVENT(xprtrdma_leaked_rep,
** Server-side RPC/RDMA events ** Server-side RPC/RDMA events
**/ **/
DECLARE_EVENT_CLASS(svcrdma_xprt_event, DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_PROTO( TP_PROTO(
const struct svc_xprt *xprt const struct svcxprt_rdma *rdma,
long status
), ),
TP_ARGS(xprt), TP_ARGS(rdma, status),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, xprt) __field(long, status)
__string(addr, xprt->xpt_remotebuf) __string(addr, rdma->sc_xprt.xpt_remotebuf)
), ),
TP_fast_assign( TP_fast_assign(
__entry->xprt = xprt; __entry->status = status;
__assign_str(addr, xprt->xpt_remotebuf); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
), ),
TP_printk("xprt=%p addr=%s", TP_printk("addr=%s status=%ld",
__entry->xprt, __get_str(addr) __get_str(addr), __entry->status
) )
); );
#define DEFINE_XPRT_EVENT(name) \ #define DEFINE_ACCEPT_EVENT(name) \
DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \ DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
TP_PROTO( \ TP_PROTO( \
const struct svc_xprt *xprt \ const struct svcxprt_rdma *rdma, \
), \ long status \
TP_ARGS(xprt)) ), \
TP_ARGS(rdma, status))
DEFINE_XPRT_EVENT(accept); DEFINE_ACCEPT_EVENT(pd);
DEFINE_XPRT_EVENT(fail); DEFINE_ACCEPT_EVENT(qp);
DEFINE_XPRT_EVENT(free); DEFINE_ACCEPT_EVENT(fabric);
DEFINE_ACCEPT_EVENT(initdepth);
DEFINE_ACCEPT_EVENT(accept);
TRACE_DEFINE_ENUM(RDMA_MSG); TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG); TRACE_DEFINE_ENUM(RDMA_NOMSG);
...@@ -1355,7 +1359,7 @@ TRACE_EVENT(svcrdma_decode_rqst, ...@@ -1355,7 +1359,7 @@ TRACE_EVENT(svcrdma_decode_rqst,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen) show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
); );
TRACE_EVENT(svcrdma_decode_short, TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO( TP_PROTO(
unsigned int hdrlen unsigned int hdrlen
), ),
...@@ -1399,7 +1403,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event, ...@@ -1399,7 +1403,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
); );
#define DEFINE_BADREQ_EVENT(name) \ #define DEFINE_BADREQ_EVENT(name) \
DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\ DEFINE_EVENT(svcrdma_badreq_event, \
svcrdma_decode_##name##_err, \
TP_PROTO( \ TP_PROTO( \
__be32 *p \ __be32 *p \
), \ ), \
...@@ -1583,28 +1588,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class, ...@@ -1583,28 +1588,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
DEFINE_SVC_DMA_EVENT(dma_map_page); DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_unmap_page); DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rwctx, TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO( TP_PROTO(
const struct svcxprt_rdma *rdma, const struct svcxprt_rdma *rdma,
unsigned int nents,
int status int status
), ),
TP_ARGS(rdma, status), TP_ARGS(rdma, nents, status),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, status) __field(int, status)
__field(unsigned int, nents)
__string(device, rdma->sc_cm_id->device->name) __string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf) __string(addr, rdma->sc_xprt.xpt_remotebuf)
), ),
TP_fast_assign( TP_fast_assign(
__entry->status = status; __entry->status = status;
__entry->nents = nents;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
TP_printk("addr=%s device=%s nents=%u status=%d",
__get_str(addr), __get_str(device), __entry->nents,
__entry->status
)
);
TRACE_EVENT(svcrdma_no_rwctx_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
),
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
__field(unsigned int, num_sges)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->num_sges = num_sges;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
TP_printk("addr=%s device=%s num_sges=%d",
__get_str(addr), __get_str(device), __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
const struct svc_rqst *rqst,
unsigned int pageno
),
TP_ARGS(rdma, rqst, pageno),
TP_STRUCT__entry(
__field(unsigned int, pageno)
__field(u32, xid)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->pageno = pageno;
__entry->xid = __be32_to_cpu(rqst->rq_xid);
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
__get_str(device), __entry->xid, __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
TP_ARGS(rdma, remaining, seg_no, num_segs),
TP_STRUCT__entry(
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
__assign_str(device, rdma->sc_cm_id->device->name); __assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
), ),
TP_printk("addr=%s device=%s status=%d", TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
__get_str(addr), __get_str(device), __entry->status __get_str(addr), __get_str(device), __entry->remaining,
__entry->seg_no, __entry->num_segs
) )
); );
......
This diff is collapsed.
...@@ -46,6 +46,7 @@ struct kthread_create_info ...@@ -46,6 +46,7 @@ struct kthread_create_info
struct kthread { struct kthread {
unsigned long flags; unsigned long flags;
unsigned int cpu; unsigned int cpu;
int (*threadfn)(void *);
void *data; void *data;
struct completion parked; struct completion parked;
struct completion exited; struct completion exited;
...@@ -152,6 +153,20 @@ bool kthread_freezable_should_stop(bool *was_frozen) ...@@ -152,6 +153,20 @@ bool kthread_freezable_should_stop(bool *was_frozen)
} }
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
* kthread_func - return the function specified on kthread creation
* @task: kthread task in question
*
* Returns NULL if the task is not a kthread.
*/
void *kthread_func(struct task_struct *task)
{
if (task->flags & PF_KTHREAD)
return to_kthread(task)->threadfn;
return NULL;
}
EXPORT_SYMBOL_GPL(kthread_func);
/** /**
* kthread_data - return data value specified on kthread creation * kthread_data - return data value specified on kthread creation
* @task: kthread task in question * @task: kthread task in question
...@@ -164,6 +179,7 @@ void *kthread_data(struct task_struct *task) ...@@ -164,6 +179,7 @@ void *kthread_data(struct task_struct *task)
{ {
return to_kthread(task)->data; return to_kthread(task)->data;
} }
EXPORT_SYMBOL_GPL(kthread_data);
/** /**
* kthread_probe_data - speculative version of kthread_data() * kthread_probe_data - speculative version of kthread_data()
...@@ -244,6 +260,7 @@ static int kthread(void *_create) ...@@ -244,6 +260,7 @@ static int kthread(void *_create)
do_exit(-ENOMEM); do_exit(-ENOMEM);
} }
self->threadfn = threadfn;
self->data = data; self->data = data;
init_completion(&self->exited); init_completion(&self->exited);
init_completion(&self->parked); init_completion(&self->parked);
......
...@@ -37,6 +37,8 @@ gss_mech_free(struct gss_api_mech *gm) ...@@ -37,6 +37,8 @@ gss_mech_free(struct gss_api_mech *gm)
for (i = 0; i < gm->gm_pf_num; i++) { for (i = 0; i < gm->gm_pf_num; i++) {
pf = &gm->gm_pfs[i]; pf = &gm->gm_pfs[i];
if (pf->domain)
auth_domain_put(pf->domain);
kfree(pf->auth_domain_name); kfree(pf->auth_domain_name);
pf->auth_domain_name = NULL; pf->auth_domain_name = NULL;
} }
...@@ -59,6 +61,7 @@ make_auth_domain_name(char *name) ...@@ -59,6 +61,7 @@ make_auth_domain_name(char *name)
static int static int
gss_mech_svc_setup(struct gss_api_mech *gm) gss_mech_svc_setup(struct gss_api_mech *gm)
{ {
struct auth_domain *dom;
struct pf_desc *pf; struct pf_desc *pf;
int i, status; int i, status;
...@@ -68,10 +71,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm) ...@@ -68,10 +71,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm)
status = -ENOMEM; status = -ENOMEM;
if (pf->auth_domain_name == NULL) if (pf->auth_domain_name == NULL)
goto out; goto out;
status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, dom = svcauth_gss_register_pseudoflavor(
pf->auth_domain_name); pf->pseudoflavor, pf->auth_domain_name);
if (status) if (IS_ERR(dom)) {
status = PTR_ERR(dom);
goto out; goto out;
}
pf->domain = dom;
} }
return 0; return 0;
out: out:
......
...@@ -223,7 +223,7 @@ static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) ...@@ -223,7 +223,7 @@ static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
static char *gssp_stringify(struct xdr_netobj *netobj) static char *gssp_stringify(struct xdr_netobj *netobj)
{ {
return kstrndup(netobj->data, netobj->len, GFP_KERNEL); return kmemdup_nul(netobj->data, netobj->len, GFP_KERNEL);
} }
static void gssp_hostbased_service(char **principal) static void gssp_hostbased_service(char **principal)
......
...@@ -809,7 +809,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom) ...@@ -809,7 +809,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
EXPORT_SYMBOL_GPL(svcauth_gss_flavor); EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
int struct auth_domain *
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{ {
struct gss_domain *new; struct gss_domain *new;
...@@ -826,21 +826,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) ...@@ -826,21 +826,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
new->h.flavour = &svcauthops_gss; new->h.flavour = &svcauthops_gss;
new->pseudoflavor = pseudoflavor; new->pseudoflavor = pseudoflavor;
stat = 0;
test = auth_domain_lookup(name, &new->h); test = auth_domain_lookup(name, &new->h);
if (test != &new->h) { /* Duplicate registration */ if (test != &new->h) {
pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
name);
stat = -EADDRINUSE;
auth_domain_put(test); auth_domain_put(test);
kfree(new->h.name); goto out_free_name;
goto out_free_dom;
} }
return 0; return test;
out_free_name:
kfree(new->h.name);
out_free_dom: out_free_dom:
kfree(new); kfree(new);
out: out:
return stat; return ERR_PTR(stat);
} }
EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
static inline int static inline int
......
...@@ -795,12 +795,6 @@ void rpcb_getport_async(struct rpc_task *task) ...@@ -795,12 +795,6 @@ void rpcb_getport_async(struct rpc_task *task)
child = rpcb_call_async(rpcb_clnt, map, proc); child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt); rpc_release_client(rpcb_clnt);
if (IS_ERR(child)) {
/* rpcb_map_release() has freed the arguments */
dprintk("RPC: %5u %s: rpc_run_task failed\n",
task->tk_pid, __func__);
return;
}
xprt->stat.bind_count++; xprt->stat.bind_count++;
rpc_put_task(child); rpc_put_task(child);
......
...@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk) ...@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk)
int rpc_clients_notifier_register(void); int rpc_clients_notifier_register(void);
void rpc_clients_notifier_unregister(void); void rpc_clients_notifier_unregister(void);
void auth_domain_cleanup(void);
#endif /* _NET_SUNRPC_SUNRPC_H */ #endif /* _NET_SUNRPC_SUNRPC_H */
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/xprtsock.h>
#include "sunrpc.h"
#include "netns.h" #include "netns.h"
unsigned int sunrpc_net_id; unsigned int sunrpc_net_id;
...@@ -131,6 +132,7 @@ cleanup_sunrpc(void) ...@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
unregister_rpc_pipefs(); unregister_rpc_pipefs();
rpc_destroy_mempool(); rpc_destroy_mempool();
unregister_pernet_subsys(&sunrpc_net_ops); unregister_pernet_subsys(&sunrpc_net_ops);
auth_domain_cleanup();
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
rpc_unregister_sysctl(); rpc_unregister_sysctl();
#endif #endif
......
...@@ -88,15 +88,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp) ...@@ -88,15 +88,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp)
switch (*ip) switch (*ip)
{ {
case SVC_POOL_AUTO: case SVC_POOL_AUTO:
return strlcpy(buf, "auto", 20); return strlcpy(buf, "auto\n", 20);
case SVC_POOL_GLOBAL: case SVC_POOL_GLOBAL:
return strlcpy(buf, "global", 20); return strlcpy(buf, "global\n", 20);
case SVC_POOL_PERCPU: case SVC_POOL_PERCPU:
return strlcpy(buf, "percpu", 20); return strlcpy(buf, "percpu\n", 20);
case SVC_POOL_PERNODE: case SVC_POOL_PERNODE:
return strlcpy(buf, "pernode", 20); return strlcpy(buf, "pernode\n", 20);
default: default:
return sprintf(buf, "%d", *ip); return sprintf(buf, "%d\n", *ip);
} }
} }
...@@ -991,6 +991,7 @@ static int __svc_register(struct net *net, const char *progname, ...@@ -991,6 +991,7 @@ static int __svc_register(struct net *net, const char *progname,
#endif #endif
} }
trace_svc_register(progname, version, protocol, port, family, error);
return error; return error;
} }
...@@ -1000,11 +1001,6 @@ int svc_rpcbind_set_version(struct net *net, ...@@ -1000,11 +1001,6 @@ int svc_rpcbind_set_version(struct net *net,
unsigned short proto, unsigned short proto,
unsigned short port) unsigned short port)
{ {
dprintk("svc: svc_register(%sv%d, %s, %u, %u)\n",
progp->pg_name, version,
proto == IPPROTO_UDP? "udp" : "tcp",
port, family);
return __svc_register(net, progp->pg_name, progp->pg_prog, return __svc_register(net, progp->pg_name, progp->pg_prog,
version, family, proto, port); version, family, proto, port);
...@@ -1024,11 +1020,8 @@ int svc_generic_rpcbind_set(struct net *net, ...@@ -1024,11 +1020,8 @@ int svc_generic_rpcbind_set(struct net *net,
return 0; return 0;
if (vers->vs_hidden) { if (vers->vs_hidden) {
dprintk("svc: svc_register(%sv%d, %s, %u, %u)" trace_svc_noregister(progp->pg_name, version, proto,
" (but not telling portmap)\n", port, family, 0);
progp->pg_name, version,
proto == IPPROTO_UDP? "udp" : "tcp",
port, family);
return 0; return 0;
} }
...@@ -1106,8 +1099,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi ...@@ -1106,8 +1099,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
if (error == -EPROTONOSUPPORT) if (error == -EPROTONOSUPPORT)
error = rpcb_register(net, program, version, 0, 0); error = rpcb_register(net, program, version, 0, 0);
dprintk("svc: %s(%sv%u), error %d\n", trace_svc_unregister(progname, version, error);
__func__, progname, version, error);
} }
/* /*
...@@ -1132,9 +1124,6 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) ...@@ -1132,9 +1124,6 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
continue; continue;
if (progp->pg_vers[i]->vs_hidden) if (progp->pg_vers[i]->vs_hidden)
continue; continue;
dprintk("svc: attempting to unregister %sv%u\n",
progp->pg_name, i);
__svc_unregister(net, progp->pg_prog, i, progp->pg_name); __svc_unregister(net, progp->pg_prog, i, progp->pg_name);
} }
} }
......
...@@ -153,6 +153,7 @@ static void svc_xprt_free(struct kref *kref) ...@@ -153,6 +153,7 @@ static void svc_xprt_free(struct kref *kref)
xprt_put(xprt->xpt_bc_xprt); xprt_put(xprt->xpt_bc_xprt);
if (xprt->xpt_bc_xps) if (xprt->xpt_bc_xps)
xprt_switch_put(xprt->xpt_bc_xps); xprt_switch_put(xprt->xpt_bc_xps);
trace_svc_xprt_free(xprt);
xprt->xpt_ops->xpo_free(xprt); xprt->xpt_ops->xpo_free(xprt);
module_put(owner); module_put(owner);
} }
...@@ -206,6 +207,7 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, ...@@ -206,6 +207,7 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
.sin6_port = htons(port), .sin6_port = htons(port),
}; };
#endif #endif
struct svc_xprt *xprt;
struct sockaddr *sap; struct sockaddr *sap;
size_t len; size_t len;
...@@ -224,7 +226,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, ...@@ -224,7 +226,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
return ERR_PTR(-EAFNOSUPPORT); return ERR_PTR(-EAFNOSUPPORT);
} }
return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
if (IS_ERR(xprt))
trace_svc_xprt_create_err(serv->sv_program->pg_name,
xcl->xcl_name, sap, xprt);
return xprt;
} }
/* /*
...@@ -304,15 +310,11 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, ...@@ -304,15 +310,11 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
{ {
int err; int err;
dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
if (err == -EPROTONOSUPPORT) { if (err == -EPROTONOSUPPORT) {
request_module("svc%s", xprt_name); request_module("svc%s", xprt_name);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred); err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
} }
if (err < 0)
dprintk("svc: transport %s not found, err %d\n",
xprt_name, -err);
return err; return err;
} }
EXPORT_SYMBOL_GPL(svc_create_xprt); EXPORT_SYMBOL_GPL(svc_create_xprt);
...@@ -780,7 +782,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) ...@@ -780,7 +782,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
int len = 0; int len = 0;
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags)) if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
xprt->xpt_ops->xpo_kill_temp_xprt(xprt); xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
svc_delete_xprt(xprt); svc_delete_xprt(xprt);
...@@ -799,6 +800,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) ...@@ -799,6 +800,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
if (newxpt) { if (newxpt) {
newxpt->xpt_cred = get_cred(xprt->xpt_cred); newxpt->xpt_cred = get_cred(xprt->xpt_cred);
svc_add_new_temp_xprt(serv, newxpt); svc_add_new_temp_xprt(serv, newxpt);
trace_svc_xprt_accept(newxpt, serv->sv_name);
} else } else
module_put(xprt->xpt_class->xcl_owner); module_put(xprt->xpt_class->xcl_owner);
} else if (svc_xprt_reserve_slot(rqstp, xprt)) { } else if (svc_xprt_reserve_slot(rqstp, xprt)) {
...@@ -835,14 +837,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -835,14 +837,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
struct svc_serv *serv = rqstp->rq_server; struct svc_serv *serv = rqstp->rq_server;
int len, err; int len, err;
dprintk("svc: server %p waiting for data (to = %ld)\n",
rqstp, timeout);
if (rqstp->rq_xprt)
printk(KERN_ERR
"svc_recv: service %p, transport not NULL!\n",
rqstp);
err = svc_alloc_arg(rqstp); err = svc_alloc_arg(rqstp);
if (err) if (err)
goto out; goto out;
...@@ -890,7 +884,6 @@ EXPORT_SYMBOL_GPL(svc_recv); ...@@ -890,7 +884,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
void svc_drop(struct svc_rqst *rqstp) void svc_drop(struct svc_rqst *rqstp)
{ {
trace_svc_drop(rqstp); trace_svc_drop(rqstp);
dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
svc_xprt_release(rqstp); svc_xprt_release(rqstp);
} }
EXPORT_SYMBOL_GPL(svc_drop); EXPORT_SYMBOL_GPL(svc_drop);
...@@ -914,16 +907,10 @@ int svc_send(struct svc_rqst *rqstp) ...@@ -914,16 +907,10 @@ int svc_send(struct svc_rqst *rqstp)
xb->page_len + xb->page_len +
xb->tail[0].iov_len; xb->tail[0].iov_len;
trace_svc_sendto(xb); trace_svc_sendto(xb);
/* Grab mutex to serialize outgoing data. */
mutex_lock(&xprt->xpt_mutex);
trace_svc_stats_latency(rqstp); trace_svc_stats_latency(rqstp);
if (test_bit(XPT_DEAD, &xprt->xpt_flags)
|| test_bit(XPT_CLOSE, &xprt->xpt_flags)) len = xprt->xpt_ops->xpo_sendto(rqstp);
len = -ENOTCONN;
else
len = xprt->xpt_ops->xpo_sendto(rqstp);
mutex_unlock(&xprt->xpt_mutex);
trace_svc_send(rqstp, len); trace_svc_send(rqstp, len);
svc_xprt_release(rqstp); svc_xprt_release(rqstp);
...@@ -1031,11 +1018,10 @@ static void svc_delete_xprt(struct svc_xprt *xprt) ...@@ -1031,11 +1018,10 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
struct svc_serv *serv = xprt->xpt_server; struct svc_serv *serv = xprt->xpt_server;
struct svc_deferred_req *dr; struct svc_deferred_req *dr;
/* Only do this once */
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
BUG(); return;
dprintk("svc: svc_delete_xprt(%p)\n", xprt); trace_svc_xprt_detach(xprt);
xprt->xpt_ops->xpo_detach(xprt); xprt->xpt_ops->xpo_detach(xprt);
if (xprt->xpt_bc_xprt) if (xprt->xpt_bc_xprt)
xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt); xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
...@@ -1056,6 +1042,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt) ...@@ -1056,6 +1042,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
void svc_close_xprt(struct svc_xprt *xprt) void svc_close_xprt(struct svc_xprt *xprt)
{ {
trace_svc_xprt_close(xprt);
set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_CLOSE, &xprt->xpt_flags);
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
/* someone else will have to effect the close */ /* someone else will have to effect the close */
...@@ -1158,16 +1145,15 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) ...@@ -1158,16 +1145,15 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
set_bit(XPT_DEFERRED, &xprt->xpt_flags); set_bit(XPT_DEFERRED, &xprt->xpt_flags);
if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
spin_unlock(&xprt->xpt_lock); spin_unlock(&xprt->xpt_lock);
dprintk("revisit canceled\n"); trace_svc_defer_drop(dr);
svc_xprt_put(xprt); svc_xprt_put(xprt);
trace_svc_drop_deferred(dr);
kfree(dr); kfree(dr);
return; return;
} }
dprintk("revisit queued\n");
dr->xprt = NULL; dr->xprt = NULL;
list_add(&dr->handle.recent, &xprt->xpt_deferred); list_add(&dr->handle.recent, &xprt->xpt_deferred);
spin_unlock(&xprt->xpt_lock); spin_unlock(&xprt->xpt_lock);
trace_svc_defer_queue(dr);
svc_xprt_enqueue(xprt); svc_xprt_enqueue(xprt);
svc_xprt_put(xprt); svc_xprt_put(xprt);
} }
...@@ -1213,22 +1199,24 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req) ...@@ -1213,22 +1199,24 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
dr->argslen << 2); dr->argslen << 2);
} }
trace_svc_defer(rqstp);
svc_xprt_get(rqstp->rq_xprt); svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt; dr->xprt = rqstp->rq_xprt;
set_bit(RQ_DROPME, &rqstp->rq_flags); set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit; dr->handle.revisit = svc_revisit;
trace_svc_defer(rqstp);
return &dr->handle; return &dr->handle;
} }
/* /*
* recv data from a deferred request into an active one * recv data from a deferred request into an active one
*/ */
static int svc_deferred_recv(struct svc_rqst *rqstp) static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
{ {
struct svc_deferred_req *dr = rqstp->rq_deferred; struct svc_deferred_req *dr = rqstp->rq_deferred;
trace_svc_defer_recv(dr);
/* setup iov_base past transport header */ /* setup iov_base past transport header */
rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
/* The iov_len does not include the transport header bytes */ /* The iov_len does not include the transport header bytes */
...@@ -1259,7 +1247,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) ...@@ -1259,7 +1247,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
struct svc_deferred_req, struct svc_deferred_req,
handle.recent); handle.recent);
list_del_init(&dr->handle.recent); list_del_init(&dr->handle.recent);
trace_svc_revisit_deferred(dr);
} else } else
clear_bit(XPT_DEFERRED, &xprt->xpt_flags); clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
spin_unlock(&xprt->xpt_lock); spin_unlock(&xprt->xpt_lock);
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <trace/events/sunrpc.h> #include <trace/events/sunrpc.h>
#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_AUTH #define RPCDBG_FACILITY RPCDBG_AUTH
...@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name) ...@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(auth_domain_find); EXPORT_SYMBOL_GPL(auth_domain_find);
/**
* auth_domain_cleanup - check that the auth_domain table is empty
*
* On module unload the auth_domain_table must be empty. To make it
* easier to catch bugs which don't clean up domains properly, we
* warn if anything remains in the table at cleanup time.
*
* Note that we cannot proactively remove the domains at this stage.
* The ->release() function might be in a module that has already been
* unloaded.
*/
void auth_domain_cleanup(void)
{
int h;
struct auth_domain *hp;
for (h = 0; h < DN_HASHMAX; h++)
hlist_for_each_entry(hp, &auth_domain_table[h], hash)
pr_warn("svc: domain %s still present at module unload.\n",
hp->name);
}
...@@ -332,15 +332,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, ...@@ -332,15 +332,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
return 0; return 0;
} }
static inline int ip_map_update(struct net *net, struct ip_map *ipm,
struct unix_domain *udom, time64_t expiry)
{
struct sunrpc_net *sn;
sn = net_generic(net, sunrpc_net_id);
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
void svcauth_unix_purge(struct net *net) void svcauth_unix_purge(struct net *net)
{ {
struct sunrpc_net *sn; struct sunrpc_net *sn;
......
This diff is collapsed.
...@@ -10,59 +10,34 @@ ...@@ -10,59 +10,34 @@
#include "xprt_rdma.h" #include "xprt_rdma.h"
#include <trace/events/rpcrdma.h> #include <trace/events/rpcrdma.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
#undef SVCRDMA_BACKCHANNEL_DEBUG
/** /**
* svc_rdma_handle_bc_reply - Process incoming backchannel reply * svc_rdma_handle_bc_reply - Process incoming backchannel Reply
* @xprt: controlling backchannel transport * @rqstp: resources for handling the Reply
* @rdma_resp: pointer to incoming transport header * @rctxt: Received message
* @rcvbuf: XDR buffer into which to decode the reply
* *
* Returns:
* %0 if @rcvbuf is filled in, xprt_complete_rqst called,
* %-EAGAIN if server should call ->recvfrom again.
*/ */
int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
struct xdr_buf *rcvbuf) struct svc_rdma_recv_ctxt *rctxt)
{ {
struct svc_xprt *sxprt = rqstp->rq_xprt;
struct rpc_xprt *xprt = sxprt->xpt_bc_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct xdr_buf *rcvbuf = &rqstp->rq_arg;
struct kvec *dst, *src = &rcvbuf->head[0]; struct kvec *dst, *src = &rcvbuf->head[0];
__be32 *rdma_resp = rctxt->rc_recv_buf;
struct rpc_rqst *req; struct rpc_rqst *req;
u32 credits; u32 credits;
size_t len;
__be32 xid;
__be32 *p;
int ret;
p = (__be32 *)src->iov_base;
len = src->iov_len;
xid = *rdma_resp;
#ifdef SVCRDMA_BACKCHANNEL_DEBUG
pr_info("%s: xid=%08x, length=%zu\n",
__func__, be32_to_cpu(xid), len);
pr_info("%s: RPC/RDMA: %*ph\n",
__func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp);
pr_info("%s: RPC: %*ph\n",
__func__, (int)len, p);
#endif
ret = -EAGAIN;
if (src->iov_len < 24)
goto out_shortreply;
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, xid); req = xprt_lookup_rqst(xprt, *rdma_resp);
if (!req) if (!req)
goto out_notfound; goto out_unlock;
dst = &req->rq_private_buf.head[0]; dst = &req->rq_private_buf.head[0];
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
if (dst->iov_len < len) if (dst->iov_len < src->iov_len)
goto out_unlock; goto out_unlock;
memcpy(dst->iov_base, p, len); memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_pin_rqst(req); xprt_pin_rqst(req);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
...@@ -71,31 +46,17 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, ...@@ -71,31 +46,17 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
credits = 1; /* don't deadlock */ credits = 1; /* don't deadlock */
else if (credits > r_xprt->rx_buf.rb_bc_max_requests) else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
credits = r_xprt->rx_buf.rb_bc_max_requests; credits = r_xprt->rx_buf.rb_bc_max_requests;
spin_lock(&xprt->transport_lock); spin_lock(&xprt->transport_lock);
xprt->cwnd = credits << RPC_CWNDSHIFT; xprt->cwnd = credits << RPC_CWNDSHIFT;
spin_unlock(&xprt->transport_lock); spin_unlock(&xprt->transport_lock);
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
ret = 0;
xprt_complete_rqst(req->rq_task, rcvbuf->len); xprt_complete_rqst(req->rq_task, rcvbuf->len);
xprt_unpin_rqst(req); xprt_unpin_rqst(req);
rcvbuf->len = 0; rcvbuf->len = 0;
out_unlock: out_unlock:
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
out:
return ret;
out_shortreply:
dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
xprt, src->iov_len);
goto out;
out_notfound:
dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
xprt, be32_to_cpu(xid));
goto out_unlock;
} }
/* Send a backwards direction RPC call. /* Send a backwards direction RPC call.
...@@ -192,10 +153,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) ...@@ -192,10 +153,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
*p++ = xdr_zero; *p++ = xdr_zero;
*p = xdr_zero; *p = xdr_zero;
#ifdef SVCRDMA_BACKCHANNEL_DEBUG
pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
#endif
rqst->rq_xtime = ktime_get(); rqst->rq_xtime = ktime_get();
rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
if (rc) if (rc)
...@@ -206,45 +163,36 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) ...@@ -206,45 +163,36 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
svc_rdma_send_ctxt_put(rdma, ctxt); svc_rdma_send_ctxt_put(rdma, ctxt);
drop_connection: drop_connection:
dprintk("svcrdma: failed to send bc call\n");
return -ENOTCONN; return -ENOTCONN;
} }
/* Send an RPC call on the passive end of a transport /**
* connection. * xprt_rdma_bc_send_request - Send a reverse-direction Call
* @rqst: rpc_rqst containing Call message to be sent
*
* Return values:
* %0 if the message was sent successfully
* %ENOTCONN if the message was not sent
*/ */
static int static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
{ {
struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt; struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
struct svcxprt_rdma *rdma; struct svcxprt_rdma *rdma =
container_of(sxprt, struct svcxprt_rdma, sc_xprt);
int ret; int ret;
dprintk("svcrdma: sending bc call with xid: %08x\n", if (test_bit(XPT_DEAD, &sxprt->xpt_flags))
be32_to_cpu(rqst->rq_xid)); return -ENOTCONN;
mutex_lock(&sxprt->xpt_mutex); ret = rpcrdma_bc_send_request(rdma, rqst);
if (ret == -ENOTCONN)
ret = -ENOTCONN; svc_close_xprt(sxprt);
rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt); return ret;
if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) {
ret = rpcrdma_bc_send_request(rdma, rqst);
if (ret == -ENOTCONN)
svc_close_xprt(sxprt);
}
mutex_unlock(&sxprt->xpt_mutex);
if (ret < 0)
return ret;
return 0;
} }
static void static void
xprt_rdma_bc_close(struct rpc_xprt *xprt) xprt_rdma_bc_close(struct rpc_xprt *xprt)
{ {
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
xprt_disconnect_done(xprt); xprt_disconnect_done(xprt);
xprt->cwnd = RPC_CWNDSHIFT; xprt->cwnd = RPC_CWNDSHIFT;
} }
...@@ -252,8 +200,6 @@ xprt_rdma_bc_close(struct rpc_xprt *xprt) ...@@ -252,8 +200,6 @@ xprt_rdma_bc_close(struct rpc_xprt *xprt)
static void static void
xprt_rdma_bc_put(struct rpc_xprt *xprt) xprt_rdma_bc_put(struct rpc_xprt *xprt)
{ {
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
xprt_rdma_free_addresses(xprt); xprt_rdma_free_addresses(xprt);
xprt_free(xprt); xprt_free(xprt);
} }
...@@ -288,19 +234,14 @@ xprt_setup_rdma_bc(struct xprt_create *args) ...@@ -288,19 +234,14 @@ xprt_setup_rdma_bc(struct xprt_create *args)
struct rpc_xprt *xprt; struct rpc_xprt *xprt;
struct rpcrdma_xprt *new_xprt; struct rpcrdma_xprt *new_xprt;
if (args->addrlen > sizeof(xprt->addr)) { if (args->addrlen > sizeof(xprt->addr))
dprintk("RPC: %s: address too large\n", __func__);
return ERR_PTR(-EBADF); return ERR_PTR(-EBADF);
}
xprt = xprt_alloc(args->net, sizeof(*new_xprt), xprt = xprt_alloc(args->net, sizeof(*new_xprt),
RPCRDMA_MAX_BC_REQUESTS, RPCRDMA_MAX_BC_REQUESTS,
RPCRDMA_MAX_BC_REQUESTS); RPCRDMA_MAX_BC_REQUESTS);
if (!xprt) { if (!xprt)
dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
__func__);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
xprt->timeout = &xprt_rdma_bc_timeout; xprt->timeout = &xprt_rdma_bc_timeout;
xprt_set_bound(xprt); xprt_set_bound(xprt);
......
...@@ -665,23 +665,23 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg, ...@@ -665,23 +665,23 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
return hdr_len; return hdr_len;
out_short: out_short:
trace_svcrdma_decode_short(rq_arg->len); trace_svcrdma_decode_short_err(rq_arg->len);
return -EINVAL; return -EINVAL;
out_version: out_version:
trace_svcrdma_decode_badvers(rdma_argp); trace_svcrdma_decode_badvers_err(rdma_argp);
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
out_drop: out_drop:
trace_svcrdma_decode_drop(rdma_argp); trace_svcrdma_decode_drop_err(rdma_argp);
return 0; return 0;
out_proc: out_proc:
trace_svcrdma_decode_badproc(rdma_argp); trace_svcrdma_decode_badproc_err(rdma_argp);
return -EINVAL; return -EINVAL;
out_inval: out_inval:
trace_svcrdma_decode_parse(rdma_argp); trace_svcrdma_decode_parse_err(rdma_argp);
return -EINVAL; return -EINVAL;
} }
...@@ -878,12 +878,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -878,12 +878,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
goto out_drop; goto out_drop;
rqstp->rq_xprt_hlen = ret; rqstp->rq_xprt_hlen = ret;
if (svc_rdma_is_backchannel_reply(xprt, p)) { if (svc_rdma_is_backchannel_reply(xprt, p))
ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, goto out_backchannel;
&rqstp->rq_arg);
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return ret;
}
svc_rdma_get_inv_rkey(rdma_xprt, ctxt); svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
p += rpcrdma_fixed_maxsz; p += rpcrdma_fixed_maxsz;
...@@ -913,6 +910,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -913,6 +910,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return ret; return ret;
out_backchannel:
svc_rdma_handle_bc_reply(rqstp, ctxt);
out_drop: out_drop:
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt); svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return 0; return 0;
......
...@@ -9,13 +9,10 @@ ...@@ -9,13 +9,10 @@
#include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/svc_rdma.h> #include <linux/sunrpc/svc_rdma.h>
#include <linux/sunrpc/debug.h>
#include "xprt_rdma.h" #include "xprt_rdma.h"
#include <trace/events/rpcrdma.h> #include <trace/events/rpcrdma.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
...@@ -39,7 +36,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); ...@@ -39,7 +36,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
struct svc_rdma_rw_ctxt { struct svc_rdma_rw_ctxt {
struct list_head rw_list; struct list_head rw_list;
struct rdma_rw_ctx rw_ctx; struct rdma_rw_ctx rw_ctx;
int rw_nents; unsigned int rw_nents;
struct sg_table rw_sg_table; struct sg_table rw_sg_table;
struct scatterlist rw_first_sgl[]; struct scatterlist rw_first_sgl[];
}; };
...@@ -67,19 +64,22 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) ...@@ -67,19 +64,22 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
GFP_KERNEL); GFP_KERNEL);
if (!ctxt) if (!ctxt)
goto out; goto out_noctx;
INIT_LIST_HEAD(&ctxt->rw_list); INIT_LIST_HEAD(&ctxt->rw_list);
} }
ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
ctxt->rw_sg_table.sgl, ctxt->rw_sg_table.sgl,
SG_CHUNK_SIZE)) { SG_CHUNK_SIZE))
kfree(ctxt); goto out_free;
ctxt = NULL;
}
out:
return ctxt; return ctxt;
out_free:
kfree(ctxt);
out_noctx:
trace_svcrdma_no_rwctx_err(rdma, sges);
return NULL;
} }
static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
...@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) ...@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
} }
} }
/**
* svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
* @rdma: controlling transport instance
* @ctxt: R/W context to prepare
* @offset: RDMA offset
* @handle: RDMA tag/handle
* @direction: I/O direction
*
* Returns on success, the number of WQEs that will be needed
* on the workqueue, or a negative errno.
*/
static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
struct svc_rdma_rw_ctxt *ctxt,
u64 offset, u32 handle,
enum dma_data_direction direction)
{
int ret;
ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
ctxt->rw_sg_table.sgl, ctxt->rw_nents,
0, offset, handle, direction);
if (unlikely(ret < 0)) {
svc_rdma_put_rw_ctxt(rdma, ctxt);
trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
}
return ret;
}
/* A chunk context tracks all I/O for moving one Read or Write /* A chunk context tracks all I/O for moving one Read or Write
* chunk. This is a a set of rdma_rw's that handle data movement * chunk. This is a a set of rdma_rw's that handle data movement
* for all segments of one chunk. * for all segments of one chunk.
...@@ -428,15 +456,13 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, ...@@ -428,15 +456,13 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
ctxt = svc_rdma_get_rw_ctxt(rdma, ctxt = svc_rdma_get_rw_ctxt(rdma,
(write_len >> PAGE_SHIFT) + 2); (write_len >> PAGE_SHIFT) + 2);
if (!ctxt) if (!ctxt)
goto out_noctx; return -ENOMEM;
constructor(info, write_len, ctxt); constructor(info, write_len, ctxt);
ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
rdma->sc_port_num, ctxt->rw_sg_table.sgl, DMA_TO_DEVICE);
ctxt->rw_nents, 0, seg_offset,
seg_handle, DMA_TO_DEVICE);
if (ret < 0) if (ret < 0)
goto out_initerr; return -EIO;
trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset); trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
...@@ -455,18 +481,9 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, ...@@ -455,18 +481,9 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
return 0; return 0;
out_overflow: out_overflow:
dprintk("svcrdma: inadequate space in Write chunk (%u)\n", trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
info->wi_nsegs); info->wi_nsegs);
return -E2BIG; return -E2BIG;
out_noctx:
dprintk("svcrdma: no R/W ctxs available\n");
return -ENOMEM;
out_initerr:
svc_rdma_put_rw_ctxt(rdma, ctxt);
trace_svcrdma_dma_map_rwctx(rdma, ret);
return -EIO;
} }
/* Send one of an xdr_buf's kvecs by itself. To send a Reply /* Send one of an xdr_buf's kvecs by itself. To send a Reply
...@@ -616,7 +633,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, ...@@ -616,7 +633,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT; sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no); ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
if (!ctxt) if (!ctxt)
goto out_noctx; return -ENOMEM;
ctxt->rw_nents = sge_no; ctxt->rw_nents = sge_no;
sg = ctxt->rw_sg_table.sgl; sg = ctxt->rw_sg_table.sgl;
...@@ -646,29 +663,18 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info, ...@@ -646,29 +663,18 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
goto out_overrun; goto out_overrun;
} }
ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp, ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
cc->cc_rdma->sc_port_num, DMA_FROM_DEVICE);
ctxt->rw_sg_table.sgl, ctxt->rw_nents,
0, offset, rkey, DMA_FROM_DEVICE);
if (ret < 0) if (ret < 0)
goto out_initerr; return -EIO;
list_add(&ctxt->rw_list, &cc->cc_rwctxts); list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret; cc->cc_sqecount += ret;
return 0; return 0;
out_noctx:
dprintk("svcrdma: no R/W ctxs available\n");
return -ENOMEM;
out_overrun: out_overrun:
dprintk("svcrdma: request overruns rq_pages\n"); trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
return -EINVAL; return -EINVAL;
out_initerr:
trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
return -EIO;
} }
/* Walk the segments in the Read chunk starting at @p and construct /* Walk the segments in the Read chunk starting at @p and construct
......
...@@ -868,12 +868,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -868,12 +868,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
__be32 *p; __be32 *p;
int ret; int ret;
/* Create the RDMA response header. xprt->xpt_mutex, ret = -ENOTCONN;
* acquired in svc_send(), serializes RPC replies. The if (svc_xprt_is_dead(xprt))
* code path below that inserts the credit grant value goto err0;
* into each transport header runs only inside this
* critical section.
*/
ret = -ENOMEM; ret = -ENOMEM;
sctxt = svc_rdma_send_ctxt_get(rdma); sctxt = svc_rdma_send_ctxt_get(rdma);
if (!sctxt) if (!sctxt)
......
...@@ -211,7 +211,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, ...@@ -211,7 +211,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
newxprt->sc_ord = param->initiator_depth; newxprt->sc_ord = param->initiator_depth;
sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa);
memcpy(&newxprt->sc_xprt.xpt_remote, sa,
newxprt->sc_xprt.xpt_remotelen);
snprintf(newxprt->sc_xprt.xpt_remotebuf,
sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa);
/* The remote port is arbitrary and not under the control of the /* The remote port is arbitrary and not under the control of the
* client ULP. Set it to a fixed value so that the DRC continues * client ULP. Set it to a fixed value so that the DRC continues
* to be effective after a reconnect. * to be effective after a reconnect.
...@@ -309,11 +314,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -309,11 +314,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct svcxprt_rdma *cma_xprt; struct svcxprt_rdma *cma_xprt;
int ret; int ret;
dprintk("svcrdma: Creating RDMA listener\n"); if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
return ERR_PTR(-EAFNOSUPPORT); return ERR_PTR(-EAFNOSUPPORT);
}
cma_xprt = svc_rdma_create_xprt(serv, net); cma_xprt = svc_rdma_create_xprt(serv, net);
if (!cma_xprt) if (!cma_xprt)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -324,7 +326,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -324,7 +326,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
RDMA_PS_TCP, IB_QPT_RC); RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(listen_id)) { if (IS_ERR(listen_id)) {
ret = PTR_ERR(listen_id); ret = PTR_ERR(listen_id);
dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
goto err0; goto err0;
} }
...@@ -333,23 +334,17 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -333,23 +334,17 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
*/ */
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
ret = rdma_set_afonly(listen_id, 1); ret = rdma_set_afonly(listen_id, 1);
if (ret) { if (ret)
dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
goto err1; goto err1;
}
#endif #endif
ret = rdma_bind_addr(listen_id, sa); ret = rdma_bind_addr(listen_id, sa);
if (ret) { if (ret)
dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
goto err1; goto err1;
}
cma_xprt->sc_cm_id = listen_id; cma_xprt->sc_cm_id = listen_id;
ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
if (ret) { if (ret)
dprintk("svcrdma: rdma_listen failed = %d\n", ret);
goto err1; goto err1;
}
/* /*
* We need to use the address from the cm_id in case the * We need to use the address from the cm_id in case the
...@@ -405,9 +400,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -405,9 +400,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (!newxprt) if (!newxprt)
return NULL; return NULL;
dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
newxprt, newxprt->sc_cm_id);
dev = newxprt->sc_cm_id->device; dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num; newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
...@@ -443,21 +435,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -443,21 +435,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_pd = ib_alloc_pd(dev, 0); newxprt->sc_pd = ib_alloc_pd(dev, 0);
if (IS_ERR(newxprt->sc_pd)) { if (IS_ERR(newxprt->sc_pd)) {
dprintk("svcrdma: error creating PD for connect request\n"); trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd));
goto errout; goto errout;
} }
newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth, newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
IB_POLL_WORKQUEUE); IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_sq_cq)) { if (IS_ERR(newxprt->sc_sq_cq))
dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout; goto errout;
}
newxprt->sc_rq_cq = newxprt->sc_rq_cq =
ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_rq_cq)) { if (IS_ERR(newxprt->sc_rq_cq))
dprintk("svcrdma: error creating RQ CQ for connect request\n");
goto errout; goto errout;
}
memset(&qp_attr, 0, sizeof qp_attr); memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler; qp_attr.event_handler = qp_event_handler;
...@@ -481,7 +469,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -481,7 +469,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
if (ret) { if (ret) {
dprintk("svcrdma: failed to create QP, ret=%d\n", ret); trace_svcrdma_qp_err(newxprt, ret);
goto errout; goto errout;
} }
newxprt->sc_qp = newxprt->sc_cm_id->qp; newxprt->sc_qp = newxprt->sc_cm_id->qp;
...@@ -489,8 +477,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -489,8 +477,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
newxprt->sc_snd_w_inv = false; newxprt->sc_snd_w_inv = false;
if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) && if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
!rdma_ib_or_roce(dev, newxprt->sc_port_num)) !rdma_ib_or_roce(dev, newxprt->sc_port_num)) {
trace_svcrdma_fabric_err(newxprt, -EINVAL);
goto errout; goto errout;
}
if (!svc_rdma_post_recvs(newxprt)) if (!svc_rdma_post_recvs(newxprt))
goto errout; goto errout;
...@@ -512,15 +502,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -512,15 +502,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
conn_param.initiator_depth = min_t(int, newxprt->sc_ord, conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
dev->attrs.max_qp_init_rd_atom); dev->attrs.max_qp_init_rd_atom);
if (!conn_param.initiator_depth) { if (!conn_param.initiator_depth) {
dprintk("svcrdma: invalid ORD setting\n");
ret = -EINVAL; ret = -EINVAL;
trace_svcrdma_initdepth_err(newxprt, ret);
goto errout; goto errout;
} }
conn_param.private_data = &pmsg; conn_param.private_data = &pmsg;
conn_param.private_data_len = sizeof(pmsg); conn_param.private_data_len = sizeof(pmsg);
ret = rdma_accept(newxprt->sc_cm_id, &conn_param); ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
if (ret) if (ret) {
trace_svcrdma_accept_err(newxprt, ret);
goto errout; goto errout;
}
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
dprintk("svcrdma: new connection %p accepted:\n", newxprt); dprintk("svcrdma: new connection %p accepted:\n", newxprt);
...@@ -535,12 +527,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -535,12 +527,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk(" ord : %d\n", conn_param.initiator_depth); dprintk(" ord : %d\n", conn_param.initiator_depth);
#endif #endif
trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
return &newxprt->sc_xprt; return &newxprt->sc_xprt;
errout: errout:
dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
/* Take a reference in case the DTO handler runs */ /* Take a reference in case the DTO handler runs */
svc_xprt_get(&newxprt->sc_xprt); svc_xprt_get(&newxprt->sc_xprt);
if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
...@@ -578,8 +567,6 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -578,8 +567,6 @@ static void __svc_rdma_free(struct work_struct *work)
container_of(work, struct svcxprt_rdma, sc_work); container_of(work, struct svcxprt_rdma, sc_work);
struct svc_xprt *xprt = &rdma->sc_xprt; struct svc_xprt *xprt = &rdma->sc_xprt;
trace_svcrdma_xprt_free(xprt);
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp); ib_drain_qp(rdma->sc_qp);
......
...@@ -2528,8 +2528,16 @@ static int bc_sendto(struct rpc_rqst *req) ...@@ -2528,8 +2528,16 @@ static int bc_sendto(struct rpc_rqst *req)
return sent; return sent;
} }
/* /**
* The send routine. Borrows from svc_send * bc_send_request - Send a backchannel Call on a TCP socket
* @req: rpc_rqst containing Call message to be sent
*
* xpt_mutex ensures @rqstp's whole message is written to the socket
* without interruption.
*
* Return values:
* %0 if the message was sent successfully
* %ENOTCONN if the message was not sent
*/ */
static int bc_send_request(struct rpc_rqst *req) static int bc_send_request(struct rpc_rqst *req)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment