Commit 839049a8 authored by NeilBrown's avatar NeilBrown Committed by J. Bruce Fields

nfsd/idmap: drop special request deferal in favour of improved default.

The idmap code manages request deferal by waiting for a reply from
userspace rather than putting the NFS request on a queue to be retried
from the start.
Now that the common deferal code does this there is no need for the
special code in idmap.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 8ff30fa4
...@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void) ...@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
cache_unregister(&nametoid_cache); cache_unregister(&nametoid_cache);
} }
/*
* Deferred request handling
*/
struct idmap_defer_req {
struct cache_req req;
struct cache_deferred_req deferred_req;
wait_queue_head_t waitq;
atomic_t count;
};
static inline void
put_mdr(struct idmap_defer_req *mdr)
{
if (atomic_dec_and_test(&mdr->count))
kfree(mdr);
}
static inline void
get_mdr(struct idmap_defer_req *mdr)
{
atomic_inc(&mdr->count);
}
static void
idmap_revisit(struct cache_deferred_req *dreq, int toomany)
{
struct idmap_defer_req *mdr =
container_of(dreq, struct idmap_defer_req, deferred_req);
wake_up(&mdr->waitq);
put_mdr(mdr);
}
static struct cache_deferred_req *
idmap_defer(struct cache_req *req)
{
struct idmap_defer_req *mdr =
container_of(req, struct idmap_defer_req, req);
mdr->deferred_req.revisit = idmap_revisit;
get_mdr(mdr);
return (&mdr->deferred_req);
}
static inline int
do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
struct cache_detail *detail, struct ent **item,
struct idmap_defer_req *mdr)
{
*item = lookup_fn(key);
if (!*item)
return -ENOMEM;
return cache_check(detail, &(*item)->h, &mdr->req);
}
static inline int
do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
struct ent *key, struct cache_detail *detail,
struct ent **item)
{
int ret = -ENOMEM;
*item = lookup_fn(key);
if (!*item)
goto out_err;
ret = -ETIMEDOUT;
if (!test_bit(CACHE_VALID, &(*item)->h.flags)
|| (*item)->h.expiry_time < seconds_since_boot()
|| detail->flush_time > (*item)->h.last_refresh)
goto out_put;
ret = -ENOENT;
if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
goto out_put;
return 0;
out_put:
cache_put(&(*item)->h, detail);
out_err:
*item = NULL;
return ret;
}
static int static int
idmap_lookup(struct svc_rqst *rqstp, idmap_lookup(struct svc_rqst *rqstp,
struct ent *(*lookup_fn)(struct ent *), struct ent *key, struct ent *(*lookup_fn)(struct ent *), struct ent *key,
struct cache_detail *detail, struct ent **item) struct cache_detail *detail, struct ent **item)
{ {
struct idmap_defer_req *mdr;
int ret; int ret;
mdr = kzalloc(sizeof(*mdr), GFP_KERNEL); *item = lookup_fn(key);
if (!mdr) if (!*item)
return -ENOMEM; return -ENOMEM;
atomic_set(&mdr->count, 1); retry:
init_waitqueue_head(&mdr->waitq); ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
mdr->req.defer = idmap_defer;
ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr); if (ret == -ETIMEDOUT) {
if (ret == -EAGAIN) { struct ent *prev_item = *item;
wait_event_interruptible_timeout(mdr->waitq, *item = lookup_fn(key);
test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ); if (*item != prev_item)
ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item); goto retry;
cache_put(&(*item)->h, detail);
} }
put_mdr(mdr);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment