Commit 277e650d authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by David S. Miller

[INET]: Consolidate the xxx_frag_kill

Since now all the xxx_frag_kill functions now work
with the generic inet_frag_queue data type, this can
be moved into a common place.

The xxx_unlink() code is moved as well.
Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 04128f23
......@@ -41,4 +41,6 @@ struct inet_frags {
void inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
#endif
......@@ -42,3 +42,26 @@ void inet_frags_fini(struct inet_frags *f)
{
}
EXPORT_SYMBOL(inet_frags_fini);
static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
{
write_lock(&f->lock);
hlist_del(&fq->list);
list_del(&fq->lru_list);
f->nqueues--;
write_unlock(&f->lock);
}
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
{
if (del_timer(&fq->timer))
atomic_dec(&fq->refcnt);
if (!(fq->last_in & COMPLETE)) {
fq_unlink(fq, f);
atomic_dec(&fq->refcnt);
fq->last_in |= COMPLETE;
}
}
EXPORT_SYMBOL(inet_frag_kill);
......@@ -108,20 +108,6 @@ int ip_frag_mem(void)
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
struct net_device *dev);
static __inline__ void __ipq_unlink(struct ipq *qp)
{
hlist_del(&qp->q.list);
list_del(&qp->q.lru_list);
ip4_frags.nqueues--;
}
static __inline__ void ipq_unlink(struct ipq *ipq)
{
write_lock(&ip4_frags.lock);
__ipq_unlink(ipq);
write_unlock(&ip4_frags.lock);
}
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
{
return jhash_3words((__force u32)id << 16 | prot,
......@@ -222,14 +208,7 @@ static __inline__ void ipq_put(struct ipq *ipq, int *work)
*/
static void ipq_kill(struct ipq *ipq)
{
if (del_timer(&ipq->q.timer))
atomic_dec(&ipq->q.refcnt);
if (!(ipq->q.last_in & COMPLETE)) {
ipq_unlink(ipq);
atomic_dec(&ipq->q.refcnt);
ipq->q.last_in |= COMPLETE;
}
inet_frag_kill(&ipq->q, &ip4_frags);
}
/* Memory limiting on fragments. Evictor trashes the oldest
......
......@@ -79,20 +79,6 @@ struct inet_frags_ctl nf_frags_ctl __read_mostly = {
static struct inet_frags nf_frags;
static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
{
hlist_del(&fq->q.list);
list_del(&fq->q.lru_list);
nf_frags.nqueues--;
}
static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
{
write_lock(&nf_frags.lock);
__fq_unlink(fq);
write_unlock(&nf_frags.lock);
}
static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
struct in6_addr *daddr)
{
......@@ -213,14 +199,7 @@ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work)
*/
static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
{
if (del_timer(&fq->q.timer))
atomic_dec(&fq->q.refcnt);
if (!(fq->q.last_in & COMPLETE)) {
fq_unlink(fq);
atomic_dec(&fq->q.refcnt);
fq->q.last_in |= COMPLETE;
}
inet_frag_kill(&fq->q, &nf_frags);
}
static void nf_ct_frag6_evictor(void)
......
......@@ -104,20 +104,6 @@ int ip6_frag_mem(void)
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct net_device *dev);
static __inline__ void __fq_unlink(struct frag_queue *fq)
{
hlist_del(&fq->q.list);
list_del(&fq->q.lru_list);
ip6_frags.nqueues--;
}
static __inline__ void fq_unlink(struct frag_queue *fq)
{
write_lock(&ip6_frags.lock);
__fq_unlink(fq);
write_unlock(&ip6_frags.lock);
}
/*
* callers should be careful not to use the hash value outside the ipfrag_lock
* as doing so could race with ipfrag_hash_rnd being recalculated.
......@@ -240,14 +226,7 @@ static __inline__ void fq_put(struct frag_queue *fq, int *work)
*/
static __inline__ void fq_kill(struct frag_queue *fq)
{
if (del_timer(&fq->q.timer))
atomic_dec(&fq->q.refcnt);
if (!(fq->q.last_in & COMPLETE)) {
fq_unlink(fq);
atomic_dec(&fq->q.refcnt);
fq->q.last_in |= COMPLETE;
}
inet_frag_kill(&fq->q, &ip6_frags);
}
static void ip6_evictor(struct inet6_dev *idev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment