Commit 630a4d38 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

nfp: bpf: record offload neutral maps in the driver

For asynchronous events originating from the device, like perf event
output, we need to be able to make sure that objects being referred
to by the FW message are valid on the host.  FW events can get queued
and reordered.  Even if we had a FW message "barrier" we should still
protect ourselves from bogus FW output.

Add a reverse-mapping hash table and record in it all raw map pointers
FW may refer to.  Only record neutral maps, i.e. perf event arrays.
These are currently the only objects FW can refer to.  Use RCU protection
on the read side, update side is under RTNL.

Since program vs map destruction order is slightly painful for offload
simply take an extra reference on all the recorded maps to make sure
they don't disappear.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 0cd3cbed
/* /*
* Copyright (C) 2017 Netronome Systems, Inc. * Copyright (C) 2017-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -43,6 +43,14 @@ ...@@ -43,6 +43,14 @@
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
const struct rhashtable_params nfp_bpf_maps_neutral_params = {
.nelem_hint = 4,
.key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
.key_offset = offsetof(struct nfp_bpf_neutral_map, ptr),
.head_offset = offsetof(struct nfp_bpf_neutral_map, l),
.automatic_shrinking = true,
};
static bool nfp_net_ebpf_capable(struct nfp_net *nn) static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{ {
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
...@@ -401,17 +409,28 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -401,17 +409,28 @@ static int nfp_bpf_init(struct nfp_app *app)
init_waitqueue_head(&bpf->cmsg_wq); init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list); INIT_LIST_HEAD(&bpf->map_list);
err = nfp_bpf_parse_capabilities(app); err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
if (err) if (err)
goto err_free_bpf; goto err_free_bpf;
err = nfp_bpf_parse_capabilities(app);
if (err)
goto err_free_neutral_maps;
return 0; return 0;
err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral);
err_free_bpf: err_free_bpf:
kfree(bpf); kfree(bpf);
return err; return err;
} }
static void nfp_check_rhashtable_empty(void *ptr, void *arg)
{
WARN_ON_ONCE(1);
}
static void nfp_bpf_clean(struct nfp_app *app) static void nfp_bpf_clean(struct nfp_app *app)
{ {
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
...@@ -419,6 +438,8 @@ static void nfp_bpf_clean(struct nfp_app *app) ...@@ -419,6 +438,8 @@ static void nfp_bpf_clean(struct nfp_app *app)
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral,
nfp_check_rhashtable_empty, NULL);
kfree(bpf); kfree(bpf);
} }
......
/* /*
* Copyright (C) 2016-2017 Netronome Systems, Inc. * Copyright (C) 2016-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/bpf_verifier.h> #include <linux/bpf_verifier.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -114,6 +115,8 @@ enum pkt_vec { ...@@ -114,6 +115,8 @@ enum pkt_vec {
* @maps_in_use: number of currently offloaded maps * @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps
* *
* @maps_neutral: hash table of offload-neutral maps (on pointer)
*
* @adjust_head: adjust head capability * @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head * @adjust_head.flags: extra flags for adjust head
* @adjust_head.off_min: minimal packet offset within buffer required * @adjust_head.off_min: minimal packet offset within buffer required
...@@ -150,6 +153,8 @@ struct nfp_app_bpf { ...@@ -150,6 +153,8 @@ struct nfp_app_bpf {
unsigned int maps_in_use; unsigned int maps_in_use;
unsigned int map_elems_in_use; unsigned int map_elems_in_use;
struct rhashtable maps_neutral;
struct nfp_bpf_cap_adjust_head { struct nfp_bpf_cap_adjust_head {
u32 flags; u32 flags;
int off_min; int off_min;
...@@ -199,6 +204,14 @@ struct nfp_bpf_map { ...@@ -199,6 +204,14 @@ struct nfp_bpf_map {
enum nfp_bpf_map_use use_map[]; enum nfp_bpf_map_use use_map[];
}; };
struct nfp_bpf_neutral_map {
struct rhash_head l;
struct bpf_map *ptr;
u32 count;
};
extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
struct nfp_prog; struct nfp_prog;
struct nfp_insn_meta; struct nfp_insn_meta;
typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
...@@ -367,6 +380,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) ...@@ -367,6 +380,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
* @error: error code if something went wrong * @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier * @stack_depth: max stack depth from the verifier
* @adjust_head_location: if program has single adjust head call - the insn no. * @adjust_head_location: if program has single adjust head call - the insn no.
* @map_records_cnt: the number of map pointers recorded for this prog
* @map_records: the map record pointers from bpf->maps_neutral
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta) * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/ */
struct nfp_prog { struct nfp_prog {
...@@ -390,6 +405,9 @@ struct nfp_prog { ...@@ -390,6 +405,9 @@ struct nfp_prog {
unsigned int stack_depth; unsigned int stack_depth;
unsigned int adjust_head_location; unsigned int adjust_head_location;
unsigned int map_records_cnt;
struct nfp_bpf_neutral_map **map_records;
struct list_head insns; struct list_head insns;
}; };
......
/* /*
* Copyright (C) 2016-2017 Netronome Systems, Inc. * Copyright (C) 2016-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -56,6 +56,126 @@ ...@@ -56,6 +56,126 @@
#include "../nfp_net_ctrl.h" #include "../nfp_net_ctrl.h"
#include "../nfp_net.h" #include "../nfp_net.h"
static int
nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_map *map)
{
struct nfp_bpf_neutral_map *record;
int err;
/* Map record paths are entered via ndo, update side is protected. */
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map,
nfp_bpf_maps_neutral_params);
if (record) {
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
record->count++;
return 0;
}
/* Grab a single ref to the map for our record. The prog destroy ndo
* happens after free_used_maps().
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map))
return PTR_ERR(map);
record = kmalloc(sizeof(*record), GFP_KERNEL);
if (!record) {
err = -ENOMEM;
goto err_map_put;
}
record->ptr = map;
record->count = 1;
err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
nfp_bpf_maps_neutral_params);
if (err)
goto err_free_rec;
nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
return 0;
err_free_rec:
kfree(record);
err_map_put:
bpf_map_put(map);
return err;
}
static void
nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
{
bool freed = false;
int i;
ASSERT_RTNL();
for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL;
continue;
}
WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
&nfp_prog->map_records[i]->l,
nfp_bpf_maps_neutral_params));
freed = true;
}
if (freed) {
synchronize_rcu();
for (i = 0; i < nfp_prog->map_records_cnt; i++)
if (nfp_prog->map_records[i]) {
bpf_map_put(nfp_prog->map_records[i]->ptr);
kfree(nfp_prog->map_records[i]);
}
}
kfree(nfp_prog->map_records);
nfp_prog->map_records = NULL;
nfp_prog->map_records_cnt = 0;
}
static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
int i, cnt, err;
/* Quickly count the maps we will have to remember */
cnt = 0;
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++;
if (!cnt)
return 0;
nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]),
GFP_KERNEL);
if (!nfp_prog->map_records)
return -ENOMEM;
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
err = nfp_map_ptr_record(bpf, nfp_prog,
prog->aux->used_maps[i]);
if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog);
return err;
}
}
WARN_ON(cnt != nfp_prog->map_records_cnt);
return 0;
}
static int static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt) unsigned int cnt)
...@@ -151,7 +271,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -151,7 +271,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64); prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
prog->aux->offload->jited_image = nfp_prog->prog; prog->aux->offload->jited_image = nfp_prog->prog;
return 0; return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
} }
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
...@@ -159,6 +279,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -159,6 +279,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog); kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog); nfp_prog_free(nfp_prog);
return 0; return 0;
......
/* /*
* Copyright (C) 2017 Netronome Systems, Inc. * Copyright (C) 2017-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
......
...@@ -282,6 +282,7 @@ void bpf_map_put(struct bpf_map *map) ...@@ -282,6 +282,7 @@ void bpf_map_put(struct bpf_map *map)
{ {
__bpf_map_put(map, true); __bpf_map_put(map, true);
} }
EXPORT_SYMBOL_GPL(bpf_map_put);
void bpf_map_put_with_uref(struct bpf_map *map) void bpf_map_put_with_uref(struct bpf_map *map)
{ {
...@@ -543,6 +544,7 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) ...@@ -543,6 +544,7 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
atomic_inc(&map->usercnt); atomic_inc(&map->usercnt);
return map; return map;
} }
EXPORT_SYMBOL_GPL(bpf_map_inc);
struct bpf_map *bpf_map_get_with_uref(u32 ufd) struct bpf_map *bpf_map_get_with_uref(u32 ufd)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment