Commit 29e5375d authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv4: add (struct uncached_list)->quarantine list

This is an optimization to keep the per-cpu lists as short as possible:

Whenever rt_flush_dev() changes one rtable dst.dev
matching the disappearing device, it can can transfer the object
to a quarantine list, waiting for a final rt_del_uncached_list().
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ba55ef81
...@@ -1485,6 +1485,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) ...@@ -1485,6 +1485,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
struct uncached_list { struct uncached_list {
spinlock_t lock; spinlock_t lock;
struct list_head head; struct list_head head;
struct list_head quarantine;
}; };
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
...@@ -1506,7 +1507,7 @@ void rt_del_uncached_list(struct rtable *rt) ...@@ -1506,7 +1507,7 @@ void rt_del_uncached_list(struct rtable *rt)
struct uncached_list *ul = rt->rt_uncached_list; struct uncached_list *ul = rt->rt_uncached_list;
spin_lock_bh(&ul->lock); spin_lock_bh(&ul->lock);
list_del(&rt->rt_uncached); list_del_init(&rt->rt_uncached);
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
} }
...@@ -1521,20 +1522,24 @@ static void ipv4_dst_destroy(struct dst_entry *dst) ...@@ -1521,20 +1522,24 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
void rt_flush_dev(struct net_device *dev) void rt_flush_dev(struct net_device *dev)
{ {
struct rtable *rt; struct rtable *rt, *safe;
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
if (list_empty(&ul->head))
continue;
spin_lock_bh(&ul->lock); spin_lock_bh(&ul->lock);
list_for_each_entry(rt, &ul->head, rt_uncached) { list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
if (rt->dst.dev != dev) if (rt->dst.dev != dev)
continue; continue;
rt->dst.dev = blackhole_netdev; rt->dst.dev = blackhole_netdev;
dev_replace_track(dev, blackhole_netdev, dev_replace_track(dev, blackhole_netdev,
&rt->dst.dev_tracker, &rt->dst.dev_tracker,
GFP_ATOMIC); GFP_ATOMIC);
list_move(&rt->rt_uncached, &ul->quarantine);
} }
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
...@@ -3706,6 +3711,7 @@ int __init ip_rt_init(void) ...@@ -3706,6 +3711,7 @@ int __init ip_rt_init(void)
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head); INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock); spin_lock_init(&ul->lock);
} }
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment