Commit ba55ef81 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

ipv6: add (struct uncached_list)->quarantine list

This is an optimization to keep the per-cpu lists as short as possible:

Whenever rt6_uncached_list_flush_dev() changes one rt6_info
matching the disappearing device, it can can transfer the object
to a quarantine list, waiting for a final rt6_uncached_list_del().
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e5f80fcf
...@@ -130,6 +130,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net, ...@@ -130,6 +130,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
struct uncached_list { struct uncached_list {
spinlock_t lock; spinlock_t lock;
struct list_head head; struct list_head head;
struct list_head quarantine;
}; };
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
...@@ -151,7 +152,7 @@ void rt6_uncached_list_del(struct rt6_info *rt) ...@@ -151,7 +152,7 @@ void rt6_uncached_list_del(struct rt6_info *rt)
struct uncached_list *ul = rt->rt6i_uncached_list; struct uncached_list *ul = rt->rt6i_uncached_list;
spin_lock_bh(&ul->lock); spin_lock_bh(&ul->lock);
list_del(&rt->rt6i_uncached); list_del_init(&rt->rt6i_uncached);
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
} }
...@@ -162,16 +163,21 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) ...@@ -162,16 +163,21 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
struct rt6_info *rt; struct rt6_info *rt, *safe;
if (list_empty(&ul->head))
continue;
spin_lock_bh(&ul->lock); spin_lock_bh(&ul->lock);
list_for_each_entry(rt, &ul->head, rt6i_uncached) { list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) {
struct inet6_dev *rt_idev = rt->rt6i_idev; struct inet6_dev *rt_idev = rt->rt6i_idev;
struct net_device *rt_dev = rt->dst.dev; struct net_device *rt_dev = rt->dst.dev;
bool handled = false;
if (rt_idev->dev == dev) { if (rt_idev->dev == dev) {
rt->rt6i_idev = in6_dev_get(blackhole_netdev); rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev); in6_dev_put(rt_idev);
handled = true;
} }
if (rt_dev == dev) { if (rt_dev == dev) {
...@@ -179,7 +185,11 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) ...@@ -179,7 +185,11 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
dev_replace_track(rt_dev, blackhole_netdev, dev_replace_track(rt_dev, blackhole_netdev,
&rt->dst.dev_tracker, &rt->dst.dev_tracker,
GFP_ATOMIC); GFP_ATOMIC);
handled = true;
} }
if (handled)
list_move(&rt->rt6i_uncached,
&ul->quarantine);
} }
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
...@@ -6727,6 +6737,7 @@ int __init ip6_route_init(void) ...@@ -6727,6 +6737,7 @@ int __init ip6_route_init(void)
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head); INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock); spin_lock_init(&ul->lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment