Commit 3f9fe37d authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jakub Kicinski

net: Move per-CPU flush-lists to bpf_net_context on PREEMPT_RT.

The per-CPU flush lists, which are accessed from within the NAPI callback
(xdp_do_flush() for instance), are per-CPU. There are subject to the
same problem as struct bpf_redirect_info.

Add the per-CPU lists cpu_map_flush_list, dev_map_flush_list and
xskmap_map_flush_list to struct bpf_net_context. Add wrappers for the
access. The lists initialized on first usage (similar to
bpf_net_ctx_get_ri()).

Cc: "Björn Töpel" <bjorn@kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@intel.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Song Liu <song@kernel.org>
Cc: Stanislav Fomichev <sdf@google.com>
Cc: Yonghong Song <yonghong.song@linux.dev>
Acked-by: default avatarJesper Dangaard Brouer <hawk@kernel.org>
Reviewed-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-16-bigeasy@linutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 401cb7da
...@@ -736,6 +736,9 @@ struct bpf_nh_params { ...@@ -736,6 +736,9 @@ struct bpf_nh_params {
/* flags for bpf_redirect_info kern_flags */ /* flags for bpf_redirect_info kern_flags */
#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
#define BPF_RI_F_RI_INIT BIT(1) #define BPF_RI_F_RI_INIT BIT(1)
#define BPF_RI_F_CPU_MAP_INIT BIT(2)
#define BPF_RI_F_DEV_MAP_INIT BIT(3)
#define BPF_RI_F_XSK_MAP_INIT BIT(4)
struct bpf_redirect_info { struct bpf_redirect_info {
u64 tgt_index; u64 tgt_index;
...@@ -750,6 +753,9 @@ struct bpf_redirect_info { ...@@ -750,6 +753,9 @@ struct bpf_redirect_info {
struct bpf_net_context { struct bpf_net_context {
struct bpf_redirect_info ri; struct bpf_redirect_info ri;
struct list_head cpu_map_flush_list;
struct list_head dev_map_flush_list;
struct list_head xskmap_map_flush_list;
}; };
static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx) static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
...@@ -787,6 +793,42 @@ static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void) ...@@ -787,6 +793,42 @@ static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
return &bpf_net_ctx->ri; return &bpf_net_ctx->ri;
} }
static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
{
struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
}
return &bpf_net_ctx->cpu_map_flush_list;
}
static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
{
struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
}
return &bpf_net_ctx->dev_map_flush_list;
}
static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
{
struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
}
return &bpf_net_ctx->xskmap_map_flush_list;
}
/* Compute the linear packet data range [data, data_end) which /* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf, * will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!) * lwt, ...). Subsystems allowing direct data access must (!)
......
...@@ -79,8 +79,6 @@ struct bpf_cpu_map { ...@@ -79,8 +79,6 @@ struct bpf_cpu_map {
struct bpf_cpu_map_entry __rcu **cpu_map; struct bpf_cpu_map_entry __rcu **cpu_map;
}; };
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{ {
u32 value_size = attr->value_size; u32 value_size = attr->value_size;
...@@ -709,7 +707,7 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq) ...@@ -709,7 +707,7 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
*/ */
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{ {
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
...@@ -761,7 +759,7 @@ int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, ...@@ -761,7 +759,7 @@ int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
void __cpu_map_flush(void) void __cpu_map_flush(void)
{ {
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list();
struct xdp_bulk_queue *bq, *tmp; struct xdp_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
...@@ -775,20 +773,9 @@ void __cpu_map_flush(void) ...@@ -775,20 +773,9 @@ void __cpu_map_flush(void)
#ifdef CONFIG_DEBUG_NET #ifdef CONFIG_DEBUG_NET
bool cpu_map_check_flush(void) bool cpu_map_check_flush(void)
{ {
if (list_empty(this_cpu_ptr(&cpu_map_flush_list))) if (list_empty(bpf_net_ctx_get_cpu_map_flush_list()))
return false; return false;
__cpu_map_flush(); __cpu_map_flush();
return true; return true;
} }
#endif #endif
static int __init cpu_map_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
return 0;
}
subsys_initcall(cpu_map_init);
...@@ -83,7 +83,6 @@ struct bpf_dtab { ...@@ -83,7 +83,6 @@ struct bpf_dtab {
u32 n_buckets; u32 n_buckets;
}; };
static DEFINE_PER_CPU(struct list_head, dev_flush_list);
static DEFINE_SPINLOCK(dev_map_lock); static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list); static LIST_HEAD(dev_map_list);
...@@ -415,7 +414,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) ...@@ -415,7 +414,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
*/ */
void __dev_flush(void) void __dev_flush(void)
{ {
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
struct xdp_dev_bulk_queue *bq, *tmp; struct xdp_dev_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
...@@ -429,7 +428,7 @@ void __dev_flush(void) ...@@ -429,7 +428,7 @@ void __dev_flush(void)
#ifdef CONFIG_DEBUG_NET #ifdef CONFIG_DEBUG_NET
bool dev_check_flush(void) bool dev_check_flush(void)
{ {
if (list_empty(this_cpu_ptr(&dev_flush_list))) if (list_empty(bpf_net_ctx_get_dev_flush_list()))
return false; return false;
__dev_flush(); __dev_flush();
return true; return true;
...@@ -460,7 +459,7 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) ...@@ -460,7 +459,7 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx, struct bpf_prog *xdp_prog) struct net_device *dev_rx, struct bpf_prog *xdp_prog)
{ {
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
...@@ -1160,15 +1159,11 @@ static struct notifier_block dev_map_notifier = { ...@@ -1160,15 +1159,11 @@ static struct notifier_block dev_map_notifier = {
static int __init dev_map_init(void) static int __init dev_map_init(void)
{ {
int cpu;
/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev)); offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier); register_netdevice_notifier(&dev_map_notifier);
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
return 0; return 0;
} }
......
...@@ -35,8 +35,6 @@ ...@@ -35,8 +35,6 @@
#define TX_BATCH_SIZE 32 #define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{ {
if (pool->cached_need_wakeup & XDP_WAKEUP_RX) if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
...@@ -372,7 +370,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -372,7 +370,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
int err; int err;
err = xsk_rcv(xs, xdp); err = xsk_rcv(xs, xdp);
...@@ -387,7 +385,7 @@ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -387,7 +385,7 @@ int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
void __xsk_map_flush(void) void __xsk_map_flush(void)
{ {
struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
struct xdp_sock *xs, *tmp; struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
...@@ -399,7 +397,7 @@ void __xsk_map_flush(void) ...@@ -399,7 +397,7 @@ void __xsk_map_flush(void)
#ifdef CONFIG_DEBUG_NET #ifdef CONFIG_DEBUG_NET
bool xsk_map_check_flush(void) bool xsk_map_check_flush(void)
{ {
if (list_empty(this_cpu_ptr(&xskmap_flush_list))) if (list_empty(bpf_net_ctx_get_xskmap_flush_list()))
return false; return false;
__xsk_map_flush(); __xsk_map_flush();
return true; return true;
...@@ -1772,7 +1770,7 @@ static struct pernet_operations xsk_net_ops = { ...@@ -1772,7 +1770,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void) static int __init xsk_init(void)
{ {
int err, cpu; int err;
err = proto_register(&xsk_proto, 0 /* no slab */); err = proto_register(&xsk_proto, 0 /* no slab */);
if (err) if (err)
...@@ -1790,8 +1788,6 @@ static int __init xsk_init(void) ...@@ -1790,8 +1788,6 @@ static int __init xsk_init(void)
if (err) if (err)
goto out_pernet; goto out_pernet;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0; return 0;
out_pernet: out_pernet:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment