Commit 7e3a28dc authored by Herbert Xu's avatar Herbert Xu Committed by Hideaki Yoshifuji

[IPSEC]: Zap killed policies from the flow cache properly.

parent 1df64a85
...@@ -87,6 +87,7 @@ typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, ...@@ -87,6 +87,7 @@ typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver); flow_resolve_t resolver);
extern void flow_cache_flush(void *object);
extern atomic_t flow_cache_genid; extern atomic_t flow_cache_genid;
#endif #endif
...@@ -266,6 +266,7 @@ struct xfrm_tmpl ...@@ -266,6 +266,7 @@ struct xfrm_tmpl
struct xfrm_policy struct xfrm_policy
{ {
struct xfrm_policy *next; struct xfrm_policy *next;
struct list_head list;
/* This lock only affects elements except for entry. */ /* This lock only affects elements except for entry. */
rwlock_t lock; rwlock_t lock;
......
...@@ -12,8 +12,12 @@ ...@@ -12,8 +12,12 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/completion.h>
#include <net/flow.h> #include <net/flow.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/semaphore.h>
struct flow_cache_entry { struct flow_cache_entry {
struct flow_cache_entry *next; struct flow_cache_entry *next;
...@@ -49,6 +53,14 @@ static struct timer_list flow_hash_rnd_timer; ...@@ -49,6 +53,14 @@ static struct timer_list flow_hash_rnd_timer;
#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
struct flow_flush_info {
void *object;
atomic_t cpuleft;
struct completion completion;
};
static struct tasklet_struct flow_flush_tasklets[NR_CPUS];
static DECLARE_MUTEX(flow_flush_sem);
static void flow_cache_new_hashrnd(unsigned long arg) static void flow_cache_new_hashrnd(unsigned long arg)
{ {
int i; int i;
...@@ -170,6 +182,22 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, ...@@ -170,6 +182,22 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
} }
} }
if (!fle) {
if (flow_count(cpu) > flow_hwm)
flow_cache_shrink(cpu);
fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
if (fle) {
fle->next = *head;
*head = fle;
fle->family = family;
fle->dir = dir;
memcpy(&fle->key, key, sizeof(*key));
fle->object = NULL;
flow_count(cpu)++;
}
}
{ {
void *obj; void *obj;
atomic_t *obj_ref; atomic_t *obj_ref;
...@@ -186,25 +214,6 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, ...@@ -186,25 +214,6 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
fle->object_ref = obj_ref; fle->object_ref = obj_ref;
if (obj) if (obj)
atomic_inc(fle->object_ref); atomic_inc(fle->object_ref);
} else {
if (flow_count(cpu) > flow_hwm)
flow_cache_shrink(cpu);
fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
if (fle) {
fle->next = *head;
*head = fle;
fle->family = family;
fle->dir = dir;
memcpy(&fle->key, key, sizeof(*key));
fle->genid = atomic_read(&flow_cache_genid);
fle->object = obj;
fle->object_ref = obj_ref;
if (obj)
atomic_inc(fle->object_ref);
flow_count(cpu)++;
}
} }
local_bh_enable(); local_bh_enable();
...@@ -212,6 +221,62 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, ...@@ -212,6 +221,62 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
} }
} }
static void flow_cache_flush_tasklet(unsigned long data)
{
struct flow_flush_info *info = (void *)data;
void *object = info->object;
int i;
int cpu;
cpu = smp_processor_id();
for (i = 0; i < flow_hash_size; i++) {
struct flow_cache_entry *fle, **flp;
flp = &flow_table[(cpu << flow_hash_shift) + i];
for (; (fle = *flp) != NULL; flp = &fle->next) {
if (fle->object != object)
continue;
fle->object = NULL;
atomic_dec(fle->object_ref);
}
}
if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion);
}
static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
int cpu;
struct tasklet_struct *tasklet;
cpu = smp_processor_id();
tasklet = &flow_flush_tasklets[cpu];
tasklet_init(tasklet, flow_cache_flush_tasklet, (unsigned long)info);
tasklet_schedule(tasklet);
}
void flow_cache_flush(void *object)
{
struct flow_flush_info info;
info.object = object;
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
down(&flow_flush_sem);
smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
local_bh_disable();
flow_cache_flush_per_cpu(&info);
local_bh_enable();
wait_for_completion(&info.completion);
up(&flow_flush_sem);
}
static int __init flow_cache_init(void) static int __init flow_cache_init(void)
{ {
unsigned long order; unsigned long order;
......
...@@ -2016,7 +2016,6 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg ...@@ -2016,7 +2016,6 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
out: out:
if (xp) { if (xp) {
xfrm_policy_kill(xp); xfrm_policy_kill(xp);
xfrm_pol_put(xp);
} }
return err; return err;
} }
...@@ -2060,7 +2059,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h ...@@ -2060,7 +2059,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
if (xp) { if (xp) {
if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) if (hdr->sadb_msg_type == SADB_X_SPDDELETE2)
xfrm_policy_kill(xp); xfrm_policy_kill(xp);
xfrm_pol_put(xp); else
xfrm_pol_put(xp);
} }
return err; return err;
} }
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/ip.h> #include <net/ip.h>
...@@ -30,6 +33,11 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; ...@@ -30,6 +33,11 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
kmem_cache_t *xfrm_dst_cache; kmem_cache_t *xfrm_dst_cache;
static struct work_struct xfrm_policy_gc_work;
static struct list_head xfrm_policy_gc_list =
LIST_HEAD_INIT(xfrm_policy_gc_list);
static spinlock_t xfrm_policy_gc_lock = SPIN_LOCK_UNLOCKED;
int xfrm_register_type(struct xfrm_type *type, unsigned short family) int xfrm_register_type(struct xfrm_type *type, unsigned short family)
{ {
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
...@@ -162,7 +170,6 @@ static void xfrm_policy_timer(unsigned long data) ...@@ -162,7 +170,6 @@ static void xfrm_policy_timer(unsigned long data)
xp = xfrm_policy_byid(0, index, 1); xp = xfrm_policy_byid(0, index, 1);
if (xp) { if (xp) {
xfrm_policy_kill(xp); xfrm_policy_kill(xp);
xfrm_pol_put(xp);
} }
} }
...@@ -204,27 +211,56 @@ void __xfrm_policy_destroy(struct xfrm_policy *policy) ...@@ -204,27 +211,56 @@ void __xfrm_policy_destroy(struct xfrm_policy *policy)
kfree(policy); kfree(policy);
} }
static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
{
struct dst_entry *dst;
while ((dst = policy->bundles) != NULL) {
policy->bundles = dst->next;
dst_free(dst);
}
if (del_timer(&policy->timer))
atomic_dec(&policy->refcnt);
if (atomic_read(&policy->refcnt) > 1)
flow_cache_flush(policy);
xfrm_pol_put(policy);
}
static void xfrm_policy_gc_task(void *data)
{
struct xfrm_policy *policy;
struct list_head *entry, *tmp;
struct list_head gc_list = LIST_HEAD_INIT(gc_list);
spin_lock_bh(&xfrm_policy_gc_lock);
list_splice_init(&xfrm_policy_gc_list, &gc_list);
spin_unlock_bh(&xfrm_policy_gc_lock);
list_for_each_safe(entry, tmp, &gc_list) {
policy = list_entry(entry, struct xfrm_policy, list);
xfrm_policy_gc_kill(policy);
}
}
/* Rule must be locked. Release descentant resources, announce /* Rule must be locked. Release descentant resources, announce
* entry dead. The rule must be unlinked from lists to the moment. * entry dead. The rule must be unlinked from lists to the moment.
*/ */
void xfrm_policy_kill(struct xfrm_policy *policy) void xfrm_policy_kill(struct xfrm_policy *policy)
{ {
struct dst_entry *dst;
write_lock_bh(&policy->lock); write_lock_bh(&policy->lock);
if (policy->dead) if (policy->dead)
goto out; goto out;
policy->dead = 1; policy->dead = 1;
while ((dst = policy->bundles) != NULL) { spin_lock(&xfrm_policy_gc_lock);
policy->bundles = dst->next; list_add(&policy->list, &xfrm_policy_gc_list);
dst_free(dst); spin_unlock(&xfrm_policy_gc_lock);
} schedule_work(&xfrm_policy_gc_work);
if (del_timer(&policy->timer))
atomic_dec(&policy->refcnt);
out: out:
write_unlock_bh(&policy->lock); write_unlock_bh(&policy->lock);
...@@ -292,7 +328,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) ...@@ -292,7 +328,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
if (delpol) { if (delpol) {
xfrm_policy_kill(delpol); xfrm_policy_kill(delpol);
xfrm_pol_put(delpol);
} }
return 0; return 0;
} }
...@@ -354,7 +389,6 @@ void xfrm_policy_flush(void) ...@@ -354,7 +389,6 @@ void xfrm_policy_flush(void)
write_unlock_bh(&xfrm_policy_lock); write_unlock_bh(&xfrm_policy_lock);
xfrm_policy_kill(xp); xfrm_policy_kill(xp);
xfrm_pol_put(xp);
write_lock_bh(&xfrm_policy_lock); write_lock_bh(&xfrm_policy_lock);
} }
...@@ -398,8 +432,8 @@ int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), ...@@ -398,8 +432,8 @@ int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
/* Find policy to apply to this flow. */ /* Find policy to apply to this flow. */
void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
void **objp, atomic_t **obj_refp) void **objp, atomic_t **obj_refp)
{ {
struct xfrm_policy *pol; struct xfrm_policy *pol;
...@@ -478,7 +512,6 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) ...@@ -478,7 +512,6 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
if (old_pol) { if (old_pol) {
xfrm_policy_kill(old_pol); xfrm_policy_kill(old_pol);
xfrm_pol_put(old_pol);
} }
return 0; return 0;
} }
...@@ -524,7 +557,6 @@ void __xfrm_sk_free_policy(struct xfrm_policy *pol, int dir) ...@@ -524,7 +557,6 @@ void __xfrm_sk_free_policy(struct xfrm_policy *pol, int dir)
write_unlock_bh(&xfrm_policy_lock); write_unlock_bh(&xfrm_policy_lock);
xfrm_policy_kill(pol); xfrm_policy_kill(pol);
xfrm_pol_put(pol);
} }
/* Resolve list of templates for the flow, given policy. */ /* Resolve list of templates for the flow, given policy. */
...@@ -1143,6 +1175,8 @@ void __init xfrm_policy_init(void) ...@@ -1143,6 +1175,8 @@ void __init xfrm_policy_init(void)
NULL, NULL); NULL, NULL);
if (!xfrm_dst_cache) if (!xfrm_dst_cache)
panic("XFRM: failed to allocate xfrm_dst_cache\n"); panic("XFRM: failed to allocate xfrm_dst_cache\n");
INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
} }
void __init xfrm_init(void) void __init xfrm_init(void)
......
...@@ -784,10 +784,9 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr ...@@ -784,10 +784,9 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
NETLINK_CB(skb).pid, NETLINK_CB(skb).pid,
MSG_DONTWAIT); MSG_DONTWAIT);
} }
xfrm_pol_put(xp);
} }
xfrm_pol_put(xp);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment