Commit 98aa546a authored by Eric Dumazet's avatar Eric Dumazet Committed by Paolo Abeni

inet: remove (struct uncached_list)->quarantine

This list is used to tranfert dst that are handled by
rt_flush_dev() and rt6_uncached_list_flush_dev() out
of the per-cpu lists.

But quarantine list is not used later.

If we simply use list_del_init(&rt->dst.rt_uncached),
this also removes the dst from per-cpu list.

This patch also makes the future calls to rt_del_uncached_list()
and rt6_uncached_list_del() faster, because no spinlock
acquisition is needed anymore.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20240604165150.726382-1-edumazet@google.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent b4cb4a13
...@@ -1481,7 +1481,6 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) ...@@ -1481,7 +1481,6 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
struct uncached_list { struct uncached_list {
spinlock_t lock; spinlock_t lock;
struct list_head head; struct list_head head;
struct list_head quarantine;
}; };
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
...@@ -1532,7 +1531,7 @@ void rt_flush_dev(struct net_device *dev) ...@@ -1532,7 +1531,7 @@ void rt_flush_dev(struct net_device *dev)
rt->dst.dev = blackhole_netdev; rt->dst.dev = blackhole_netdev;
netdev_ref_replace(dev, blackhole_netdev, netdev_ref_replace(dev, blackhole_netdev,
&rt->dst.dev_tracker, GFP_ATOMIC); &rt->dst.dev_tracker, GFP_ATOMIC);
list_move(&rt->dst.rt_uncached, &ul->quarantine); list_del_init(&rt->dst.rt_uncached);
} }
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
...@@ -3661,7 +3660,6 @@ int __init ip_rt_init(void) ...@@ -3661,7 +3660,6 @@ int __init ip_rt_init(void)
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head); INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock); spin_lock_init(&ul->lock);
} }
#ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_ROUTE_CLASSID
......
...@@ -131,7 +131,6 @@ static struct fib6_info *rt6_get_route_info(struct net *net, ...@@ -131,7 +131,6 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
struct uncached_list { struct uncached_list {
spinlock_t lock; spinlock_t lock;
struct list_head head; struct list_head head;
struct list_head quarantine;
}; };
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
...@@ -189,8 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) ...@@ -189,8 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev)
handled = true; handled = true;
} }
if (handled) if (handled)
list_move(&rt->dst.rt_uncached, list_del_init(&rt->dst.rt_uncached);
&ul->quarantine);
} }
spin_unlock_bh(&ul->lock); spin_unlock_bh(&ul->lock);
} }
...@@ -6755,7 +6753,6 @@ int __init ip6_route_init(void) ...@@ -6755,7 +6753,6 @@ int __init ip6_route_init(void)
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head); INIT_LIST_HEAD(&ul->head);
INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock); spin_lock_init(&ul->lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment