Commit 76ecec21 authored by Trond Myklebust's avatar Trond Myklebust Committed by J. Bruce Fields

knfsd: Simplify NFS duplicate replay cache

Simplify the duplicate replay cache by initialising the preallocated
cache entry, so that we can use it as a key for the cache lookup.

Note that the 99.999% case we want to optimise for is still the one
where the lookup fails, and we have to add this entry to the cache,
so preinitialising should not cause a performance penalty.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 3e87da51
...@@ -121,7 +121,7 @@ nfsd_cache_hash(__be32 xid) ...@@ -121,7 +121,7 @@ nfsd_cache_hash(__be32 xid)
} }
static struct svc_cacherep * static struct svc_cacherep *
nfsd_reply_cache_alloc(void) nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum)
{ {
struct svc_cacherep *rp; struct svc_cacherep *rp;
...@@ -130,6 +130,16 @@ nfsd_reply_cache_alloc(void) ...@@ -130,6 +130,16 @@ nfsd_reply_cache_alloc(void)
rp->c_state = RC_UNUSED; rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE; rp->c_type = RC_NOCACHE;
INIT_LIST_HEAD(&rp->c_lru); INIT_LIST_HEAD(&rp->c_lru);
rp->c_xid = rqstp->rq_xid;
rp->c_proc = rqstp->rq_proc;
memset(&rp->c_addr, 0, sizeof(rp->c_addr));
rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
rp->c_prot = rqstp->rq_prot;
rp->c_vers = rqstp->rq_vers;
rp->c_len = rqstp->rq_arg.len;
rp->c_csum = csum;
} }
return rp; return rp;
} }
...@@ -141,9 +151,11 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp) ...@@ -141,9 +151,11 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
drc_mem_usage -= rp->c_replvec.iov_len; drc_mem_usage -= rp->c_replvec.iov_len;
kfree(rp->c_replvec.iov_base); kfree(rp->c_replvec.iov_base);
} }
list_del(&rp->c_lru); if (rp->c_state != RC_UNUSED) {
atomic_dec(&num_drc_entries); list_del(&rp->c_lru);
drc_mem_usage -= sizeof(*rp); atomic_dec(&num_drc_entries);
drc_mem_usage -= sizeof(*rp);
}
kmem_cache_free(drc_slab, rp); kmem_cache_free(drc_slab, rp);
} }
...@@ -319,24 +331,23 @@ nfsd_cache_csum(struct svc_rqst *rqstp) ...@@ -319,24 +331,23 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
} }
static bool static bool
nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) nfsd_cache_match(const struct svc_cacherep *key, const struct svc_cacherep *rp)
{ {
/* Check RPC XID first */ /* Check RPC XID first */
if (rqstp->rq_xid != rp->c_xid) if (key->c_xid != rp->c_xid)
return false; return false;
/* compare checksum of NFS data */ /* compare checksum of NFS data */
if (csum != rp->c_csum) { if (key->c_csum != rp->c_csum) {
++payload_misses; ++payload_misses;
return false; return false;
} }
/* Other discriminators */ /* Other discriminators */
if (rqstp->rq_proc != rp->c_proc || if (key->c_proc != rp->c_proc ||
rqstp->rq_prot != rp->c_prot || key->c_prot != rp->c_prot ||
rqstp->rq_vers != rp->c_vers || key->c_vers != rp->c_vers ||
rqstp->rq_arg.len != rp->c_len || key->c_len != rp->c_len ||
!rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || memcmp(&key->c_addr, &rp->c_addr, sizeof(key->c_addr)) != 0)
rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
return false; return false;
return true; return true;
...@@ -345,19 +356,18 @@ nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) ...@@ -345,19 +356,18 @@ nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
/* /*
* Search the request hash for an entry that matches the given rqstp. * Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or * Must be called with cache_lock held. Returns the found entry or
* NULL on failure. * inserts an empty key on failure.
*/ */
static struct svc_cacherep * static struct svc_cacherep *
nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp, nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key)
__wsum csum)
{ {
struct svc_cacherep *rp, *ret = NULL; struct svc_cacherep *rp, *ret = key;
struct list_head *rh = &b->lru_head; struct list_head *rh = &b->lru_head;
unsigned int entries = 0; unsigned int entries = 0;
list_for_each_entry(rp, rh, c_lru) { list_for_each_entry(rp, rh, c_lru) {
++entries; ++entries;
if (nfsd_cache_match(rqstp, csum, rp)) { if (nfsd_cache_match(key, rp)) {
ret = rp; ret = rp;
break; break;
} }
...@@ -374,6 +384,7 @@ nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp, ...@@ -374,6 +384,7 @@ nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
atomic_read(&num_drc_entries)); atomic_read(&num_drc_entries));
} }
lru_put_end(b, ret);
return ret; return ret;
} }
...@@ -389,9 +400,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -389,9 +400,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
{ {
struct svc_cacherep *rp, *found; struct svc_cacherep *rp, *found;
__be32 xid = rqstp->rq_xid; __be32 xid = rqstp->rq_xid;
u32 proto = rqstp->rq_prot,
vers = rqstp->rq_vers,
proc = rqstp->rq_proc;
__wsum csum; __wsum csum;
u32 hash = nfsd_cache_hash(xid); u32 hash = nfsd_cache_hash(xid);
struct nfsd_drc_bucket *b = &drc_hashtbl[hash]; struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
...@@ -410,52 +418,38 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) ...@@ -410,52 +418,38 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* Since the common case is a cache miss followed by an insert, * Since the common case is a cache miss followed by an insert,
* preallocate an entry. * preallocate an entry.
*/ */
rp = nfsd_reply_cache_alloc(); rp = nfsd_reply_cache_alloc(rqstp, csum);
spin_lock(&b->cache_lock); if (!rp) {
if (likely(rp)) { dprintk("nfsd: unable to allocate DRC entry!\n");
atomic_inc(&num_drc_entries); return rtn;
drc_mem_usage += sizeof(*rp);
} }
/* go ahead and prune the cache */ spin_lock(&b->cache_lock);
prune_bucket(b); found = nfsd_cache_insert(b, rp);
if (found != rp) {
found = nfsd_cache_search(b, rqstp, csum); nfsd_reply_cache_free_locked(rp);
if (found) {
if (likely(rp))
nfsd_reply_cache_free_locked(rp);
rp = found; rp = found;
goto found_entry; goto found_entry;
} }
if (!rp) {
dprintk("nfsd: unable to allocate DRC entry!\n");
goto out;
}
nfsdstats.rcmisses++; nfsdstats.rcmisses++;
rqstp->rq_cacherep = rp; rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG; rp->c_state = RC_INPROG;
rp->c_xid = xid;
rp->c_proc = proc;
rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
rp->c_prot = proto;
rp->c_vers = vers;
rp->c_len = rqstp->rq_arg.len;
rp->c_csum = csum;
lru_put_end(b, rp); atomic_inc(&num_drc_entries);
drc_mem_usage += sizeof(*rp);
/* go ahead and prune the cache */
prune_bucket(b);
out: out:
spin_unlock(&b->cache_lock); spin_unlock(&b->cache_lock);
return rtn; return rtn;
found_entry: found_entry:
nfsdstats.rchits++;
/* We found a matching entry which is either in progress or done. */ /* We found a matching entry which is either in progress or done. */
lru_put_end(b, rp); nfsdstats.rchits++;
rtn = RC_DROPIT; rtn = RC_DROPIT;
/* Request being processed */ /* Request being processed */
if (rp->c_state == RC_INPROG) if (rp->c_state == RC_INPROG)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment