Commit 0157edc8 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'device-ops-as-cb'

Quentin Monnet says:

====================
For passing device functions for offloaded eBPF programs, there used to
be no place where to store the pointer without making the non-offloaded
programs pay a memory price.

As a consequence, three functions were called with ndo_bpf() through
specific commands. Now that we have struct bpf_offload_dev, and since none
of those operations rely on RTNL, we can turn these three commands into
hooks inside the struct bpf_prog_offload_ops, and pass them as part of
bpf_offload_dev_create().

This patch set changes the offload architecture to do so, and brings the
relevant changes to the nfp and netdevsim drivers.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c8123ead 16a8cb5c
...@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
} }
bpf->bpf_dev = bpf_offload_dev_create(); bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev); err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err) if (err)
goto err_free_neutral_maps; goto err_free_neutral_maps;
......
...@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); ...@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog); int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code); bool nfp_bpf_supported_opcode(u8 code);
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf; struct netdev_bpf;
struct nfp_app; struct nfp_app;
......
...@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ...@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct nfp_bpf_neutral_map *record; struct nfp_bpf_neutral_map *record;
int err; int err;
/* Map record paths are entered via ndo, update side is protected. */
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */ /* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id, record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params); nfp_bpf_maps_neutral_params);
...@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog) ...@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
bool freed = false; bool freed = false;
int i; int i;
ASSERT_RTNL();
for (i = 0; i < nfp_prog->map_records_cnt; i++) { for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) { if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL; nfp_prog->map_records[i] = NULL;
...@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) ...@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog); kfree(nfp_prog);
} }
static int static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{ {
struct bpf_prog *prog = bpf->verifier.prog; struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog; struct nfp_prog *nfp_prog;
int ret; int ret;
...@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, ...@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
goto err_free; goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0; return 0;
...@@ -219,8 +212,9 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, ...@@ -219,8 +212,9 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
return ret; return ret;
} }
static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) static int nfp_bpf_translate(struct bpf_prog *prog)
{ {
struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_instr; unsigned int max_instr;
int err; int err;
...@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog); return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
} }
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) static void nfp_bpf_destroy(struct bpf_prog *prog)
{ {
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog); kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog); nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog); nfp_prog_free(nfp_prog);
return 0;
} }
/* Atomic engine requires values to be in big endian, we need to byte swap /* Atomic engine requires values to be in big endian, we need to byte swap
...@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) ...@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{ {
switch (bpf->command) { switch (bpf->command) {
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_bpf_verifier_prep(app, nn, bpf);
case BPF_OFFLOAD_TRANSLATE:
return nfp_bpf_translate(nn, bpf->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC: case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap); return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE: case BPF_OFFLOAD_MAP_FREE:
...@@ -601,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, ...@@ -601,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
return 0; return 0;
} }
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
.prepare = nfp_bpf_verifier_prep,
.translate = nfp_bpf_translate,
.destroy = nfp_bpf_destroy,
};
...@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0; return 0;
} }
static int int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) int prev_insn_idx)
{ {
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta; struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
...@@ -745,7 +745,7 @@ nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) ...@@ -745,7 +745,7 @@ nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
goto continue_subprog; goto continue_subprog;
} }
static int nfp_bpf_finalize(struct bpf_verifier_env *env) int nfp_bpf_finalize(struct bpf_verifier_env *env)
{ {
struct bpf_subprog_info *info; struct bpf_subprog_info *info;
struct nfp_prog *nfp_prog; struct nfp_prog *nfp_prog;
...@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env) ...@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0; return 0;
} }
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
};
...@@ -91,11 +91,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env) ...@@ -91,11 +91,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env)
return 0; return 0;
} }
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
.insn_hook = nsim_bpf_verify_insn,
.finalize = nsim_bpf_finalize,
};
static bool nsim_xdp_offload_active(struct netdevsim *ns) static bool nsim_xdp_offload_active(struct netdevsim *ns)
{ {
return ns->xdp_hw.prog; return ns->xdp_hw.prog;
...@@ -263,6 +258,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) ...@@ -263,6 +258,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
return 0; return 0;
} }
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);
if (!ns->bpf_bind_accept)
return -EOPNOTSUPP;
return nsim_bpf_create_prog(ns, prog);
}
static int nsim_bpf_translate(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
state->state = "xlated";
return 0;
}
static void nsim_bpf_destroy_prog(struct bpf_prog *prog) static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{ {
struct nsim_bpf_bound_prog *state; struct nsim_bpf_bound_prog *state;
...@@ -275,6 +288,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog) ...@@ -275,6 +288,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
kfree(state); kfree(state);
} }
static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
.insn_hook = nsim_bpf_verify_insn,
.finalize = nsim_bpf_finalize,
.prepare = nsim_bpf_verifier_prep,
.translate = nsim_bpf_translate,
.destroy = nsim_bpf_destroy_prog,
};
static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{ {
if (bpf->prog && bpf->prog->aux->offload) { if (bpf->prog && bpf->prog->aux->offload) {
...@@ -539,24 +560,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) ...@@ -539,24 +560,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
ASSERT_RTNL(); ASSERT_RTNL();
switch (bpf->command) { switch (bpf->command) {
case BPF_OFFLOAD_VERIFIER_PREP:
if (!ns->bpf_bind_accept)
return -EOPNOTSUPP;
err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
if (err)
return err;
bpf->verifier.ops = &nsim_bpf_analyzer_ops;
return 0;
case BPF_OFFLOAD_TRANSLATE:
state = bpf->offload.prog->aux->offload->dev_priv;
state->state = "xlated";
return 0;
case BPF_OFFLOAD_DESTROY:
nsim_bpf_destroy_prog(bpf->offload.prog);
return 0;
case XDP_QUERY_PROG: case XDP_QUERY_PROG:
return xdp_attachment_query(&ns->xdp, bpf); return xdp_attachment_query(&ns->xdp, bpf);
case XDP_QUERY_PROG_HW: case XDP_QUERY_PROG_HW:
...@@ -599,7 +602,7 @@ int nsim_bpf_init(struct netdevsim *ns) ...@@ -599,7 +602,7 @@ int nsim_bpf_init(struct netdevsim *ns)
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM; return -ENOMEM;
ns->sdev->bpf_dev = bpf_offload_dev_create(); ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err) if (err)
return err; return err;
......
...@@ -268,15 +268,18 @@ struct bpf_prog_offload_ops { ...@@ -268,15 +268,18 @@ struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env, int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx); int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env); int (*finalize)(struct bpf_verifier_env *env);
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
}; };
struct bpf_prog_offload { struct bpf_prog_offload {
struct bpf_prog *prog; struct bpf_prog *prog;
struct net_device *netdev; struct net_device *netdev;
struct bpf_offload_dev *offdev;
void *dev_priv; void *dev_priv;
struct list_head offloads; struct list_head offloads;
bool dev_state; bool dev_state;
const struct bpf_prog_offload_ops *dev_ops;
void *jited_image; void *jited_image;
u32 jited_len; u32 jited_len;
}; };
...@@ -692,7 +695,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, ...@@ -692,7 +695,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
struct bpf_offload_dev *bpf_offload_dev_create(void); struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev); struct net_device *netdev);
......
...@@ -245,7 +245,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) ...@@ -245,7 +245,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur_func(env)->regs; return cur_func(env)->regs;
} }
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx); int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env); int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
......
...@@ -863,9 +863,6 @@ enum bpf_netdev_command { ...@@ -863,9 +863,6 @@ enum bpf_netdev_command {
XDP_QUERY_PROG, XDP_QUERY_PROG,
XDP_QUERY_PROG_HW, XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */ /* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE, BPF_OFFLOAD_MAP_FREE,
XDP_QUERY_XSK_UMEM, XDP_QUERY_XSK_UMEM,
...@@ -891,15 +888,6 @@ struct netdev_bpf { ...@@ -891,15 +888,6 @@ struct netdev_bpf {
/* flags with which program was installed */ /* flags with which program was installed */
u32 prog_flags; u32 prog_flags;
}; };
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
struct bpf_prog *prog;
} offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct { struct {
struct bpf_offloaded_map *offmap; struct bpf_offloaded_map *offmap;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
static DECLARE_RWSEM(bpf_devs_lock); static DECLARE_RWSEM(bpf_devs_lock);
struct bpf_offload_dev { struct bpf_offload_dev {
const struct bpf_prog_offload_ops *ops;
struct list_head netdevs; struct list_head netdevs;
}; };
...@@ -106,6 +107,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) ...@@ -106,6 +107,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
err = -EINVAL; err = -EINVAL;
goto err_unlock; goto err_unlock;
} }
offload->offdev = ondev->offdev;
prog->aux->offload = offload; prog->aux->offload = offload;
list_add_tail(&offload->offloads, &ondev->progs); list_add_tail(&offload->offloads, &ondev->progs);
dev_put(offload->netdev); dev_put(offload->netdev);
...@@ -121,40 +123,19 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) ...@@ -121,40 +123,19 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return err; return err;
} }
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
struct netdev_bpf *data)
{ {
struct bpf_prog_offload *offload = prog->aux->offload; struct bpf_prog_offload *offload;
struct net_device *netdev; int ret = -ENODEV;
ASSERT_RTNL();
if (!offload)
return -ENODEV;
netdev = offload->netdev;
data->command = cmd;
return netdev->netdev_ops->ndo_bpf(netdev, data);
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
struct netdev_bpf data = {};
int err;
data.verifier.prog = env->prog;
rtnl_lock(); down_read(&bpf_devs_lock);
err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data); offload = prog->aux->offload;
if (err) if (offload)
goto exit_unlock; ret = offload->offdev->ops->prepare(prog);
offload->dev_state = !ret;
up_read(&bpf_devs_lock);
env->prog->aux->offload->dev_ops = data.verifier.ops; return ret;
env->prog->aux->offload->dev_state = true;
exit_unlock:
rtnl_unlock();
return err;
} }
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
...@@ -166,7 +147,8 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, ...@@ -166,7 +147,8 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
down_read(&bpf_devs_lock); down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; offload = env->prog->aux->offload;
if (offload) if (offload)
ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); ret = offload->offdev->ops->insn_hook(env, insn_idx,
prev_insn_idx);
up_read(&bpf_devs_lock); up_read(&bpf_devs_lock);
return ret; return ret;
...@@ -180,8 +162,8 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env) ...@@ -180,8 +162,8 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
down_read(&bpf_devs_lock); down_read(&bpf_devs_lock);
offload = env->prog->aux->offload; offload = env->prog->aux->offload;
if (offload) { if (offload) {
if (offload->dev_ops->finalize) if (offload->offdev->ops->finalize)
ret = offload->dev_ops->finalize(env); ret = offload->offdev->ops->finalize(env);
else else
ret = 0; ret = 0;
} }
...@@ -193,12 +175,9 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env) ...@@ -193,12 +175,9 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
static void __bpf_prog_offload_destroy(struct bpf_prog *prog) static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{ {
struct bpf_prog_offload *offload = prog->aux->offload; struct bpf_prog_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
data.offload.prog = prog;
if (offload->dev_state) if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); offload->offdev->ops->destroy(prog);
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
bpf_prog_free_id(prog, true); bpf_prog_free_id(prog, true);
...@@ -210,24 +189,22 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog) ...@@ -210,24 +189,22 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
void bpf_prog_offload_destroy(struct bpf_prog *prog) void bpf_prog_offload_destroy(struct bpf_prog *prog)
{ {
rtnl_lock();
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
if (prog->aux->offload) if (prog->aux->offload)
__bpf_prog_offload_destroy(prog); __bpf_prog_offload_destroy(prog);
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);
rtnl_unlock();
} }
static int bpf_prog_offload_translate(struct bpf_prog *prog) static int bpf_prog_offload_translate(struct bpf_prog *prog)
{ {
struct netdev_bpf data = {}; struct bpf_prog_offload *offload;
int ret; int ret = -ENODEV;
data.offload.prog = prog;
rtnl_lock(); down_read(&bpf_devs_lock);
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); offload = prog->aux->offload;
rtnl_unlock(); if (offload)
ret = offload->offdev->ops->translate(prog);
up_read(&bpf_devs_lock);
return ret; return ret;
} }
...@@ -655,7 +632,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, ...@@ -655,7 +632,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
} }
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
struct bpf_offload_dev *bpf_offload_dev_create(void) struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
{ {
struct bpf_offload_dev *offdev; struct bpf_offload_dev *offdev;
int err; int err;
...@@ -673,6 +651,7 @@ struct bpf_offload_dev *bpf_offload_dev_create(void) ...@@ -673,6 +651,7 @@ struct bpf_offload_dev *bpf_offload_dev_create(void)
if (!offdev) if (!offdev)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
offdev->ops = ops;
INIT_LIST_HEAD(&offdev->netdevs); INIT_LIST_HEAD(&offdev->netdevs);
return offdev; return offdev;
......
...@@ -6368,7 +6368,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ...@@ -6368,7 +6368,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
goto skip_full_check; goto skip_full_check;
if (bpf_prog_is_dev_bound(env->prog->aux)) { if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env); ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret) if (ret)
goto skip_full_check; goto skip_full_check;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment