Commit 74001bcd authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

[PATCH] kNFSd: Assorted fixed for NFS export cache

The most significant fix is cleaning up properly when
nfs service is stopped.

Also fix some refcounting problems and other little bits.
parent e927119b
......@@ -153,9 +153,13 @@ int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out;
dprintk("Path seems to be <%s>\n", buf);
err = 0;
if (len == 0)
if (len == 0) {
struct svc_expkey *ek;
set_bit(CACHE_NEGATIVE, &key.h.flags);
else {
ek = svc_expkey_lookup(&key, 2);
if (ek)
expkey_put(&ek->h, &svc_expkey_cache);
} else {
struct nameidata nd;
struct svc_expkey *ek;
struct svc_export *exp;
......
......@@ -591,6 +591,7 @@ nfserrno (int errno)
{ nfserr_dquot, -EDQUOT },
#endif
{ nfserr_stale, -ESTALE },
{ nfserr_dropit, -EAGAIN },
{ nfserr_dropit, -ENOMEM },
{ -1, -EIO }
};
......
......@@ -167,7 +167,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
dput(mounts);
dput(dentry);
mntput(mnt);
goto out;
goto out_nfserr;
}
if (exp2 &&
((exp->ex_flags & NFSEXP_CROSSMNT)
......
......@@ -115,6 +115,8 @@ struct cache_deferred_req {
struct list_head recent; /* on fifo */
struct cache_head *item; /* cache item we wait on */
time_t recv_time;
void *owner; /* we might need to discard all defered requests
* owned by someone */
void (*revisit)(struct cache_deferred_req *req,
int too_many);
};
......@@ -168,12 +170,14 @@ RTN *FNAME ARGS \
tmp = container_of(*hp, RTN, MEMBER); \
if (TEST) { /* found a match */ \
\
if (set == 1 && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
break; \
\
atomic_inc(&tmp->MEMBER.refcnt); \
if (set) { \
if (set!= 2 && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
if (set == 1 && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
{ /* need to swap in new */ \
RTN *t2; \
if (!new) break; \
\
new->MEMBER.next = tmp->MEMBER.next; \
*hp = &new->MEMBER; \
......@@ -242,6 +246,7 @@ RTN *FNAME ARGS \
extern void cache_defer_req(struct cache_req *req, struct cache_head *item);
extern void cache_revisit_request(struct cache_head *item);
extern void cache_clean_deferred(void *owner);
static inline struct cache_head *cache_get(struct cache_head *h)
{
......
......@@ -218,7 +218,6 @@ static inline void svc_free_allpages(struct svc_rqst *rqstp)
}
struct svc_deferred_req {
struct svc_serv *serv;
u32 prot; /* protocol (UDP or TCP) */
struct sockaddr_in addr;
struct svc_sock *svsk; /* where reply must go */
......
......@@ -310,14 +310,17 @@ int cache_clean(void)
cp = & current_detail->hash_table[current_index];
ch = *cp;
for (; ch; cp= & ch->next, ch= *cp) {
if (atomic_read(&ch->refcnt))
continue;
if (ch->expiry_time < get_seconds()
|| ch->last_refresh < current_detail->flush_time
)
break;
if (current_detail->nextcheck > ch->expiry_time)
current_detail->nextcheck = ch->expiry_time+1;
if (ch->expiry_time >= get_seconds()
&& ch->last_refresh >= current_detail->flush_time
)
continue;
if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
queue_loose(current_detail, ch);
if (atomic_read(&ch->refcnt))
continue;
}
if (ch) {
cache_get(ch);
......@@ -467,6 +470,31 @@ void cache_revisit_request(struct cache_head *item)
}
}
void cache_clean_deferred(void *owner)
{
struct cache_deferred_req *dreq, *tmp;
struct list_head pending;
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
list_del(&dreq->hash);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}
}
spin_unlock(&cache_defer_lock);
while (!list_empty(&pending)) {
dreq = list_entry(pending.next, struct cache_deferred_req, recent);
list_del_init(&dreq->recent);
dreq->revisit(dreq, 1);
}
}
/*
* communicate with user-space
*
......
......@@ -99,6 +99,8 @@ svc_destroy(struct svc_serv *serv)
svc_delete_socket(svsk);
}
cache_clean_deferred(serv);
/* Unregister service with the portmapper */
svc_register(serv, 0, 0);
kfree(serv);
......
......@@ -195,12 +195,12 @@ static int ip_map_parse(struct cache_detail *cd,
ipm.m_addr.s_addr =
htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
ipm.h.flags = 0;
if (dom)
if (dom) {
ipm.m_client = container_of(dom, struct unix_domain, h);
else
ipm.m_add_change = ipm.m_client->addr_changes;
} else
set_bit(CACHE_NEGATIVE, &ipm.h.flags);
ipm.h.expiry_time = expiry;
ipm.m_add_change = ipm.m_client->addr_changes;
ipmp = ip_map_lookup(&ipm, 1);
if (ipmp)
......
......@@ -1443,7 +1443,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
{
struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
struct svc_serv *serv = dr->serv;
struct svc_serv *serv = dreq->owner;
struct svc_sock *svsk;
if (too_many) {
......@@ -1481,7 +1481,7 @@ svc_defer(struct cache_req *req)
if (dr == NULL)
return NULL;
dr->serv = rqstp->rq_server;
dr->handle.owner = rqstp->rq_server;
dr->prot = rqstp->rq_prot;
dr->addr = rqstp->rq_addr;
dr->argslen = rqstp->rq_arg.len >> 2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment