Commit e312b9e7 authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

xsk: Make xskmap flush_list common for all map instances

The xskmap flush list is used to track entries that need to flushed
from via the xdp_do_flush_map() function. This list used to be
per-map, but there is really no reason for that. Instead make the
flush list global for all xskmaps, which simplifies __xsk_map_flush()
and xsk_map_alloc().
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarToke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20191219061006.21980-5-bjorn.topel@gmail.com
parent fb5aacdf
...@@ -72,7 +72,6 @@ struct xdp_umem { ...@@ -72,7 +72,6 @@ struct xdp_umem {
struct xsk_map { struct xsk_map {
struct bpf_map map; struct bpf_map map;
struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */ spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[]; struct xdp_sock *xsk_map[];
}; };
...@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, ...@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry); struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map); int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map); void xsk_map_put(struct xsk_map *map);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
struct xdp_sock *xs); void __xsk_map_flush(void);
void __xsk_map_flush(struct bpf_map *map);
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key) u32 key)
...@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, ...@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0; return 0;
} }
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
struct xdp_sock *xs)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void __xsk_map_flush(struct bpf_map *map) static inline void __xsk_map_flush(void)
{ {
} }
......
...@@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, ...@@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{ {
struct bpf_map_memory mem; struct bpf_map_memory mem;
int cpu, err, numa_node; int err, numa_node;
struct xsk_map *m; struct xsk_map *m;
u64 cost, size; u64 size;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
numa_node = bpf_map_attr_numa_node(attr); numa_node = bpf_map_attr_numa_node(attr);
size = struct_size(m, xsk_map, attr->max_entries); size = struct_size(m, xsk_map, attr->max_entries);
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
err = bpf_map_charge_init(&mem, cost); err = bpf_map_charge_init(&mem, size);
if (err < 0) if (err < 0)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
bpf_map_charge_move(&m->map.memory, &mem); bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list) {
bpf_map_charge_finish(&m->map.memory);
bpf_map_area_free(m);
return ERR_PTR(-ENOMEM);
}
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
return &m->map; return &m->map;
} }
...@@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map) ...@@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map); bpf_clear_redirect_map(map);
synchronize_net(); synchronize_net();
free_percpu(m->flush_list);
bpf_map_area_free(m); bpf_map_area_free(m);
} }
......
...@@ -3511,8 +3511,7 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, ...@@ -3511,8 +3511,7 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map, struct bpf_map *map,
struct xdp_buff *xdp, struct xdp_buff *xdp)
u32 index)
{ {
int err; int err;
...@@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, ...@@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
case BPF_MAP_TYPE_XSKMAP: { case BPF_MAP_TYPE_XSKMAP: {
struct xdp_sock *xs = fwd; struct xdp_sock *xs = fwd;
err = __xsk_map_redirect(map, xdp, xs); err = __xsk_map_redirect(xs, xdp);
return err; return err;
} }
default: default:
...@@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void) ...@@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void)
__cpu_map_flush(map); __cpu_map_flush(map);
break; break;
case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_XSKMAP:
__xsk_map_flush(map); __xsk_map_flush();
break; break;
default: default:
break; break;
...@@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, ...@@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map(); xdp_do_flush_map();
err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
if (unlikely(err)) if (unlikely(err))
goto err; goto err;
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#define TX_BATCH_SIZE 16 #define TX_BATCH_SIZE 16
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{ {
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
...@@ -264,11 +266,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -264,11 +266,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return err; return err;
} }
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
struct xdp_sock *xs)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
int err; int err;
err = xsk_rcv(xs, xdp); err = xsk_rcv(xs, xdp);
...@@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, ...@@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
return 0; return 0;
} }
void __xsk_map_flush(struct bpf_map *map) void __xsk_map_flush(void)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct xdp_sock *xs, *tmp; struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
...@@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = { ...@@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void) static int __init xsk_init(void)
{ {
int err; int err, cpu;
err = proto_register(&xsk_proto, 0 /* no slab */); err = proto_register(&xsk_proto, 0 /* no slab */);
if (err) if (err)
...@@ -1195,6 +1194,8 @@ static int __init xsk_init(void) ...@@ -1195,6 +1194,8 @@ static int __init xsk_init(void)
if (err) if (err)
goto out_pernet; goto out_pernet;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0; return 0;
out_pernet: out_pernet:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment