Commit c92bbaa0 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'simplify-do_redirect'

Björn Töpel says:

====================
This series aims to simplify the XDP maps and
xdp_do_redirect_map()/xdp_do_flush_map(), and to crank out some more
performance from XDP_REDIRECT scenarios.

The first part of the series simplifies all XDP_REDIRECT capable maps,
so that __XXX_flush_map() does not require the map parameter, by
moving the flush list from the map to global scope.

This results in that the map_to_flush member can be removed from
struct bpf_redirect_info, and its corresponding logic.

Simpler code, and more performance due to that checks/code per-packet
is moved to flush.

Pre-series performance:
  $ sudo taskset -c 22 ./xdpsock -i enp134s0f0 -q 20 -n 1 -r -z

   sock0@enp134s0f0:20 rxdrop xdp-drv
                  pps         pkts        1.00
  rx              20,797,350  230,942,399
  tx              0           0

  $ sudo ./xdp_redirect_cpu --dev enp134s0f0 --cpu 22 xdp_cpu_map0

  Running XDP/eBPF prog_name:xdp_cpu_map5_lb_hash_ip_pairs
  XDP-cpumap      CPU:to  pps            drop-pps    extra-info
  XDP-RX          20      7723038        0           0
  XDP-RX          total   7723038        0
  cpumap_kthread  total   0              0           0
  redirect_err    total   0              0
  xdp_exception   total   0              0

Post-series performance:
  $ sudo taskset -c 22 ./xdpsock -i enp134s0f0 -q 20 -n 1 -r -z

   sock0@enp134s0f0:20 rxdrop xdp-drv
                  pps         pkts        1.00
  rx              21,524,979  86,835,327
  tx              0           0

  $ sudo ./xdp_redirect_cpu --dev enp134s0f0 --cpu 22 xdp_cpu_map0

  Running XDP/eBPF prog_name:xdp_cpu_map5_lb_hash_ip_pairs
  XDP-cpumap      CPU:to  pps            drop-pps    extra-info
  XDP-RX          20      7840124        0           0
  XDP-RX          total   7840124        0
  cpumap_kthread  total   0              0           0
  redirect_err    total   0              0
  xdp_exception   total   0              0

Results: +3.5% and +1.5% for the ubenchmarks.

v1->v2 [1]:
  * Removed 'unused-variable' compiler warning (Jakub)

[1] https://lore.kernel.org/bpf/20191218105400.2895-1-bjorn.topel@gmail.com/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 5bf2fc1f 1170beaa
...@@ -959,14 +959,14 @@ struct sk_buff; ...@@ -959,14 +959,14 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_flush(struct bpf_map *map); void __dev_map_flush(void);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx); struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog); struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_flush(struct bpf_map *map); void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx); struct net_device *dev_rx);
...@@ -1068,7 +1068,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map ...@@ -1068,7 +1068,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL; return NULL;
} }
static inline void __dev_map_flush(struct bpf_map *map) static inline void __dev_map_flush(void)
{ {
} }
...@@ -1097,7 +1097,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) ...@@ -1097,7 +1097,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL; return NULL;
} }
static inline void __cpu_map_flush(struct bpf_map *map) static inline void __cpu_map_flush(void)
{ {
} }
......
...@@ -592,7 +592,6 @@ struct bpf_redirect_info { ...@@ -592,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index; u32 tgt_index;
void *tgt_value; void *tgt_value;
struct bpf_map *map; struct bpf_map *map;
struct bpf_map *map_to_flush;
u32 kern_flags; u32 kern_flags;
}; };
......
...@@ -72,7 +72,6 @@ struct xdp_umem { ...@@ -72,7 +72,6 @@ struct xdp_umem {
struct xsk_map { struct xsk_map {
struct bpf_map map; struct bpf_map map;
struct list_head __percpu *flush_list;
spinlock_t lock; /* Synchronize map updates */ spinlock_t lock; /* Synchronize map updates */
struct xdp_sock *xsk_map[]; struct xdp_sock *xsk_map[];
}; };
...@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, ...@@ -139,9 +138,8 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry); struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map); int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map); void xsk_map_put(struct xsk_map *map);
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
struct xdp_sock *xs); void __xsk_map_flush(void);
void __xsk_map_flush(struct bpf_map *map);
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key) u32 key)
...@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, ...@@ -369,13 +367,12 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0; return 0;
} }
static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
struct xdp_sock *xs)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void __xsk_map_flush(struct bpf_map *map) static inline void __xsk_map_flush(void)
{ {
} }
......
...@@ -72,17 +72,18 @@ struct bpf_cpu_map { ...@@ -72,17 +72,18 @@ struct bpf_cpu_map {
struct bpf_map map; struct bpf_map map;
/* Below members specific for map type */ /* Below members specific for map type */
struct bpf_cpu_map_entry **cpu_map; struct bpf_cpu_map_entry **cpu_map;
struct list_head __percpu *flush_list;
}; };
static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx); static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{ {
struct bpf_cpu_map *cmap; struct bpf_cpu_map *cmap;
int err = -ENOMEM; int err = -ENOMEM;
int ret, cpu;
u64 cost; u64 cost;
int ret;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -106,7 +107,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) ...@@ -106,7 +107,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
cost += sizeof(struct list_head) * num_possible_cpus();
/* Notice returns -EPERM on if map size is larger than memlock limit */ /* Notice returns -EPERM on if map size is larger than memlock limit */
ret = bpf_map_charge_init(&cmap->map.memory, cost); ret = bpf_map_charge_init(&cmap->map.memory, cost);
...@@ -115,23 +115,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) ...@@ -115,23 +115,14 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
goto free_cmap; goto free_cmap;
} }
cmap->flush_list = alloc_percpu(struct list_head);
if (!cmap->flush_list)
goto free_charge;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(cmap->flush_list, cpu));
/* Alloc array for possible remote "destination" CPUs */ /* Alloc array for possible remote "destination" CPUs */
cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
sizeof(struct bpf_cpu_map_entry *), sizeof(struct bpf_cpu_map_entry *),
cmap->map.numa_node); cmap->map.numa_node);
if (!cmap->cpu_map) if (!cmap->cpu_map)
goto free_percpu; goto free_charge;
return &cmap->map; return &cmap->map;
free_percpu:
free_percpu(cmap->flush_list);
free_charge: free_charge:
bpf_map_charge_finish(&cmap->map.memory); bpf_map_charge_finish(&cmap->map.memory);
free_cmap: free_cmap:
...@@ -399,22 +390,14 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, ...@@ -399,22 +390,14 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
static void __cpu_map_entry_free(struct rcu_head *rcu) static void __cpu_map_entry_free(struct rcu_head *rcu)
{ {
struct bpf_cpu_map_entry *rcpu; struct bpf_cpu_map_entry *rcpu;
int cpu;
/* This cpu_map_entry have been disconnected from map and one /* This cpu_map_entry have been disconnected from map and one
* RCU graze-period have elapsed. Thus, XDP cannot queue any * RCU grace-period have elapsed. Thus, XDP cannot queue any
* new packets and cannot change/set flush_needed that can * new packets and cannot change/set flush_needed that can
* find this entry. * find this entry.
*/ */
rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu); rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
/* Flush remaining packets in percpu bulkq */
for_each_online_cpu(cpu) {
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
/* No concurrent bq_enqueue can run at this point */
bq_flush_to_queue(bq, false);
}
free_percpu(rcpu->bulkq); free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */ /* Cannot kthread_stop() here, last put free rcpu resources */
put_cpu_map_entry(rcpu); put_cpu_map_entry(rcpu);
...@@ -436,7 +419,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu) ...@@ -436,7 +419,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
* percpu bulkq to queue. Due to caller map_delete_elem() disable * percpu bulkq to queue. Due to caller map_delete_elem() disable
* preemption, cannot call kthread_stop() to make sure queue is empty. * preemption, cannot call kthread_stop() to make sure queue is empty.
* Instead a work_queue is started for stopping kthread, * Instead a work_queue is started for stopping kthread,
* cpu_map_kthread_stop, which waits for an RCU graze period before * cpu_map_kthread_stop, which waits for an RCU grace period before
* stopping kthread, emptying the queue. * stopping kthread, emptying the queue.
*/ */
static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap, static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
...@@ -507,7 +490,6 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -507,7 +490,6 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
static void cpu_map_free(struct bpf_map *map) static void cpu_map_free(struct bpf_map *map)
{ {
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
int cpu;
u32 i; u32 i;
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
...@@ -522,18 +504,6 @@ static void cpu_map_free(struct bpf_map *map) ...@@ -522,18 +504,6 @@ static void cpu_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map); bpf_clear_redirect_map(map);
synchronize_rcu(); synchronize_rcu();
/* To ensure all pending flush operations have completed wait for flush
* list be empty on _all_ cpus. Because the above synchronize_rcu()
* ensures the map is disconnected from the program we can assume no new
* items will be added to the list.
*/
for_each_online_cpu(cpu) {
struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu);
while (!list_empty(flush_list))
cond_resched();
}
/* For cpu_map the remote CPUs can still be using the entries /* For cpu_map the remote CPUs can still be using the entries
* (struct bpf_cpu_map_entry). * (struct bpf_cpu_map_entry).
*/ */
...@@ -544,10 +514,9 @@ static void cpu_map_free(struct bpf_map *map) ...@@ -544,10 +514,9 @@ static void cpu_map_free(struct bpf_map *map)
if (!rcpu) if (!rcpu)
continue; continue;
/* bq flush and cleanup happens after RCU graze-period */ /* bq flush and cleanup happens after RCU grace-period */
__cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */ __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
} }
free_percpu(cmap->flush_list);
bpf_map_area_free(cmap->cpu_map); bpf_map_area_free(cmap->cpu_map);
kfree(cmap); kfree(cmap);
} }
...@@ -599,7 +568,7 @@ const struct bpf_map_ops cpu_map_ops = { ...@@ -599,7 +568,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
}; };
static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx) static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
{ {
struct bpf_cpu_map_entry *rcpu = bq->obj; struct bpf_cpu_map_entry *rcpu = bq->obj;
unsigned int processed = 0, drops = 0; unsigned int processed = 0, drops = 0;
...@@ -620,10 +589,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx) ...@@ -620,10 +589,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
err = __ptr_ring_produce(q, xdpf); err = __ptr_ring_produce(q, xdpf);
if (err) { if (err) {
drops++; drops++;
if (likely(in_napi_ctx)) xdp_return_frame_rx_napi(xdpf);
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
} }
processed++; processed++;
} }
...@@ -642,11 +608,11 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx) ...@@ -642,11 +608,11 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
*/ */
static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{ {
struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list); struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(bq, true); bq_flush_to_queue(bq);
/* Notice, xdp_buff/page MUST be queued here, long enough for /* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver * driver to code invoking us to finished, due to driver
...@@ -681,16 +647,26 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, ...@@ -681,16 +647,26 @@ int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
return 0; return 0;
} }
void __cpu_map_flush(struct bpf_map *map) void __cpu_map_flush(void)
{ {
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct list_head *flush_list = this_cpu_ptr(cmap->flush_list);
struct xdp_bulk_queue *bq, *tmp; struct xdp_bulk_queue *bq, *tmp;
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
bq_flush_to_queue(bq, true); bq_flush_to_queue(bq);
/* If already running, costs spin_lock_irqsave + smb_mb */ /* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(bq->obj->kthread); wake_up_process(bq->obj->kthread);
} }
} }
static int __init cpu_map_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
return 0;
}
subsys_initcall(cpu_map_init);
...@@ -75,7 +75,6 @@ struct bpf_dtab_netdev { ...@@ -75,7 +75,6 @@ struct bpf_dtab_netdev {
struct bpf_dtab { struct bpf_dtab {
struct bpf_map map; struct bpf_map map;
struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list;
struct list_head list; struct list_head list;
/* these are only used for DEVMAP_HASH type maps */ /* these are only used for DEVMAP_HASH type maps */
...@@ -85,6 +84,7 @@ struct bpf_dtab { ...@@ -85,6 +84,7 @@ struct bpf_dtab {
u32 n_buckets; u32 n_buckets;
}; };
static DEFINE_PER_CPU(struct list_head, dev_map_flush_list);
static DEFINE_SPINLOCK(dev_map_lock); static DEFINE_SPINLOCK(dev_map_lock);
static LIST_HEAD(dev_map_list); static LIST_HEAD(dev_map_list);
...@@ -109,8 +109,8 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, ...@@ -109,8 +109,8 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{ {
int err, cpu; u64 cost = 0;
u64 cost; int err;
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 || if (attr->max_entries == 0 || attr->key_size != 4 ||
...@@ -125,9 +125,6 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -125,9 +125,6 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr); bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */
cost = (u64) sizeof(struct list_head) * num_possible_cpus();
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
...@@ -143,17 +140,10 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -143,17 +140,10 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (err) if (err)
return -EINVAL; return -EINVAL;
dtab->flush_list = alloc_percpu(struct list_head);
if (!dtab->flush_list)
goto free_charge;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head) if (!dtab->dev_index_head)
goto free_percpu; goto free_charge;
spin_lock_init(&dtab->index_lock); spin_lock_init(&dtab->index_lock);
} else { } else {
...@@ -161,13 +151,11 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -161,13 +151,11 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
sizeof(struct bpf_dtab_netdev *), sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node); dtab->map.numa_node);
if (!dtab->netdev_map) if (!dtab->netdev_map)
goto free_percpu; goto free_charge;
} }
return 0; return 0;
free_percpu:
free_percpu(dtab->flush_list);
free_charge: free_charge:
bpf_map_charge_finish(&dtab->map.memory); bpf_map_charge_finish(&dtab->map.memory);
return -ENOMEM; return -ENOMEM;
...@@ -201,7 +189,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) ...@@ -201,7 +189,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
static void dev_map_free(struct bpf_map *map) static void dev_map_free(struct bpf_map *map)
{ {
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
int i, cpu; int i;
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were * so the programs (can be more than one that used this map) were
...@@ -221,18 +209,6 @@ static void dev_map_free(struct bpf_map *map) ...@@ -221,18 +209,6 @@ static void dev_map_free(struct bpf_map *map)
/* Make sure prior __dev_map_entry_free() have completed. */ /* Make sure prior __dev_map_entry_free() have completed. */
rcu_barrier(); rcu_barrier();
/* To ensure all pending flush operations have completed wait for flush
* list to empty on _all_ cpus.
* Because the above synchronize_rcu() ensures the map is disconnected
* from the program we can assume no new items will be added.
*/
for_each_online_cpu(cpu) {
struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu);
while (!list_empty(flush_list))
cond_resched();
}
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
for (i = 0; i < dtab->n_buckets; i++) { for (i = 0; i < dtab->n_buckets; i++) {
struct bpf_dtab_netdev *dev; struct bpf_dtab_netdev *dev;
...@@ -266,7 +242,6 @@ static void dev_map_free(struct bpf_map *map) ...@@ -266,7 +242,6 @@ static void dev_map_free(struct bpf_map *map)
bpf_map_area_free(dtab->netdev_map); bpf_map_area_free(dtab->netdev_map);
} }
free_percpu(dtab->flush_list);
kfree(dtab); kfree(dtab);
} }
...@@ -345,8 +320,7 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, ...@@ -345,8 +320,7 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
return -ENOENT; return -ENOENT;
} }
static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags)
bool in_napi_ctx)
{ {
struct bpf_dtab_netdev *obj = bq->obj; struct bpf_dtab_netdev *obj = bq->obj;
struct net_device *dev = obj->dev; struct net_device *dev = obj->dev;
...@@ -384,11 +358,7 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, ...@@ -384,11 +358,7 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
for (i = 0; i < bq->count; i++) { for (i = 0; i < bq->count; i++) {
struct xdp_frame *xdpf = bq->q[i]; struct xdp_frame *xdpf = bq->q[i];
/* RX path under NAPI protection, can return frames faster */ xdp_return_frame_rx_napi(xdpf);
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
drops++; drops++;
} }
goto out; goto out;
...@@ -401,15 +371,14 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, ...@@ -401,15 +371,14 @@ static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags,
* net device can be torn down. On devmap tear down we ensure the flush list * net device can be torn down. On devmap tear down we ensure the flush list
* is empty before completing to ensure all flush operations have completed. * is empty before completing to ensure all flush operations have completed.
*/ */
void __dev_map_flush(struct bpf_map *map) void __dev_map_flush(void)
{ {
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
struct list_head *flush_list = this_cpu_ptr(dtab->flush_list);
struct xdp_bulk_queue *bq, *tmp; struct xdp_bulk_queue *bq, *tmp;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_safe(bq, tmp, flush_list, flush_node) list_for_each_entry_safe(bq, tmp, flush_list, flush_node)
bq_xmit_all(bq, XDP_XMIT_FLUSH, true); bq_xmit_all(bq, XDP_XMIT_FLUSH);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -436,11 +405,11 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, ...@@ -436,11 +405,11 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct net_device *dev_rx) struct net_device *dev_rx)
{ {
struct list_head *flush_list = this_cpu_ptr(obj->dtab->flush_list); struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(bq, 0, true); bq_xmit_all(bq, 0);
/* Ingress dev_rx will be the same for all xdp_frame's in /* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed * bulk_queue, because bq stored per-CPU and must be flushed
...@@ -509,27 +478,11 @@ static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) ...@@ -509,27 +478,11 @@ static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
return dev ? &dev->ifindex : NULL; return dev ? &dev->ifindex : NULL;
} }
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
{
if (dev->dev->netdev_ops->ndo_xdp_xmit) {
struct xdp_bulk_queue *bq;
int cpu;
rcu_read_lock();
for_each_online_cpu(cpu) {
bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(bq, XDP_XMIT_FLUSH, false);
}
rcu_read_unlock();
}
}
static void __dev_map_entry_free(struct rcu_head *rcu) static void __dev_map_entry_free(struct rcu_head *rcu)
{ {
struct bpf_dtab_netdev *dev; struct bpf_dtab_netdev *dev;
dev = container_of(rcu, struct bpf_dtab_netdev, rcu); dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
dev_map_flush_old(dev);
free_percpu(dev->bulkq); free_percpu(dev->bulkq);
dev_put(dev->dev); dev_put(dev->dev);
kfree(dev); kfree(dev);
...@@ -810,10 +763,15 @@ static struct notifier_block dev_map_notifier = { ...@@ -810,10 +763,15 @@ static struct notifier_block dev_map_notifier = {
static int __init dev_map_init(void) static int __init dev_map_init(void)
{ {
int cpu;
/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
offsetof(struct _bpf_dtab_netdev, dev)); offsetof(struct _bpf_dtab_netdev, dev));
register_netdevice_notifier(&dev_map_notifier); register_netdevice_notifier(&dev_map_notifier);
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(dev_map_flush_list, cpu));
return 0; return 0;
} }
......
...@@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, ...@@ -72,9 +72,9 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
{ {
struct bpf_map_memory mem; struct bpf_map_memory mem;
int cpu, err, numa_node; int err, numa_node;
struct xsk_map *m; struct xsk_map *m;
u64 cost, size; u64 size;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -86,9 +86,8 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
numa_node = bpf_map_attr_numa_node(attr); numa_node = bpf_map_attr_numa_node(attr);
size = struct_size(m, xsk_map, attr->max_entries); size = struct_size(m, xsk_map, attr->max_entries);
cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus());
err = bpf_map_charge_init(&mem, cost); err = bpf_map_charge_init(&mem, size);
if (err < 0) if (err < 0)
return ERR_PTR(err); return ERR_PTR(err);
...@@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) ...@@ -102,16 +101,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
bpf_map_charge_move(&m->map.memory, &mem); bpf_map_charge_move(&m->map.memory, &mem);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
m->flush_list = alloc_percpu(struct list_head);
if (!m->flush_list) {
bpf_map_charge_finish(&m->map.memory);
bpf_map_area_free(m);
return ERR_PTR(-ENOMEM);
}
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
return &m->map; return &m->map;
} }
...@@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map) ...@@ -121,7 +110,6 @@ static void xsk_map_free(struct bpf_map *map)
bpf_clear_redirect_map(map); bpf_clear_redirect_map(map);
synchronize_net(); synchronize_net();
free_percpu(m->flush_list);
bpf_map_area_free(m); bpf_map_area_free(m);
} }
......
...@@ -3510,36 +3510,16 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, ...@@ -3510,36 +3510,16 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
} }
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
struct bpf_map *map, struct bpf_map *map, struct xdp_buff *xdp)
struct xdp_buff *xdp,
u32 index)
{ {
int err;
switch (map->map_type) { switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH: { case BPF_MAP_TYPE_DEVMAP_HASH:
struct bpf_dtab_netdev *dst = fwd; return dev_map_enqueue(fwd, xdp, dev_rx);
case BPF_MAP_TYPE_CPUMAP:
err = dev_map_enqueue(dst, xdp, dev_rx); return cpu_map_enqueue(fwd, xdp, dev_rx);
if (unlikely(err)) case BPF_MAP_TYPE_XSKMAP:
return err; return __xsk_map_redirect(fwd, xdp);
break;
}
case BPF_MAP_TYPE_CPUMAP: {
struct bpf_cpu_map_entry *rcpu = fwd;
err = cpu_map_enqueue(rcpu, xdp, dev_rx);
if (unlikely(err))
return err;
break;
}
case BPF_MAP_TYPE_XSKMAP: {
struct xdp_sock *xs = fwd;
err = __xsk_map_redirect(map, xdp, xs);
return err;
}
default: default:
break; break;
} }
...@@ -3548,26 +3528,9 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, ...@@ -3548,26 +3528,9 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
void xdp_do_flush_map(void) void xdp_do_flush_map(void)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); __dev_map_flush();
struct bpf_map *map = ri->map_to_flush; __cpu_map_flush();
__xsk_map_flush();
ri->map_to_flush = NULL;
if (map) {
switch (map->map_type) {
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
__dev_map_flush(map);
break;
case BPF_MAP_TYPE_CPUMAP:
__cpu_map_flush(map);
break;
case BPF_MAP_TYPE_XSKMAP:
__xsk_map_flush(map);
break;
default:
break;
}
}
} }
EXPORT_SYMBOL_GPL(xdp_do_flush_map); EXPORT_SYMBOL_GPL(xdp_do_flush_map);
...@@ -3616,14 +3579,10 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, ...@@ -3616,14 +3579,10 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
ri->tgt_value = NULL; ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL); WRITE_ONCE(ri->map, NULL);
if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
xdp_do_flush_map();
err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
if (unlikely(err)) if (unlikely(err))
goto err; goto err;
ri->map_to_flush = map;
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
return 0; return 0;
err: err:
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#define TX_BATCH_SIZE 16 #define TX_BATCH_SIZE 16
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{ {
return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
...@@ -264,11 +266,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -264,11 +266,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return err; return err;
} }
int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
struct xdp_sock *xs)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
int err; int err;
err = xsk_rcv(xs, xdp); err = xsk_rcv(xs, xdp);
...@@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, ...@@ -281,10 +281,9 @@ int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
return 0; return 0;
} }
void __xsk_map_flush(struct bpf_map *map) void __xsk_map_flush(void)
{ {
struct xsk_map *m = container_of(map, struct xsk_map, map); struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct list_head *flush_list = this_cpu_ptr(m->flush_list);
struct xdp_sock *xs, *tmp; struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
...@@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = { ...@@ -1177,7 +1176,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void) static int __init xsk_init(void)
{ {
int err; int err, cpu;
err = proto_register(&xsk_proto, 0 /* no slab */); err = proto_register(&xsk_proto, 0 /* no slab */);
if (err) if (err)
...@@ -1195,6 +1194,8 @@ static int __init xsk_init(void) ...@@ -1195,6 +1194,8 @@ static int __init xsk_init(void)
if (err) if (err)
goto out_pernet; goto out_pernet;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
return 0; return 0;
out_pernet: out_pernet:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment