Commit a35b9af4 authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov

bpf: Add a kfunc for generic type cast

Implement bpf_rdonly_cast() which tries to cast the object
to a specified type. This tries to support use case like below:
  #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
where skb_end_pointer(SKB) is a 'unsigned char *' and needs to
be casted to 'struct skb_shared_info *'.

The signature of bpf_rdonly_cast() looks like
   void *bpf_rdonly_cast(void *obj, __u32 btf_id)
The function returns the same 'obj' but with PTR_TO_BTF_ID with
btf_id. The verifier will ensure btf_id being a struct type.

Since the supported type cast may not reflect what the 'obj'
represents, the returned btf_id is marked as PTR_UNTRUSTED, so
the return value and subsequent pointer chasing cannot be
used as helper/kfunc arguments.
Signed-off-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221120195437.3114585-1-yhs@fb.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent fd264ca0
...@@ -1884,6 +1884,11 @@ void *bpf_cast_to_kern_ctx(void *obj) ...@@ -1884,6 +1884,11 @@ void *bpf_cast_to_kern_ctx(void *obj)
return obj; return obj;
} }
void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
{
return obj__ign;
}
__diag_pop(); __diag_pop();
BTF_SET8_START(generic_btf_ids) BTF_SET8_START(generic_btf_ids)
...@@ -1913,6 +1918,7 @@ BTF_ID(func, bpf_task_release) ...@@ -1913,6 +1918,7 @@ BTF_ID(func, bpf_task_release)
BTF_SET8_START(common_btf_ids) BTF_SET8_START(common_btf_ids)
BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
BTF_ID_FLAGS(func, bpf_rdonly_cast)
BTF_SET8_END(common_btf_ids) BTF_SET8_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = { static const struct btf_kfunc_id_set common_kfunc_set = {
......
...@@ -8153,6 +8153,7 @@ enum special_kfunc_type { ...@@ -8153,6 +8153,7 @@ enum special_kfunc_type {
KF_bpf_list_pop_front, KF_bpf_list_pop_front,
KF_bpf_list_pop_back, KF_bpf_list_pop_back,
KF_bpf_cast_to_kern_ctx, KF_bpf_cast_to_kern_ctx,
KF_bpf_rdonly_cast,
}; };
BTF_SET_START(special_kfunc_set) BTF_SET_START(special_kfunc_set)
...@@ -8163,6 +8164,7 @@ BTF_ID(func, bpf_list_push_back) ...@@ -8163,6 +8164,7 @@ BTF_ID(func, bpf_list_push_back)
BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_front)
BTF_ID(func, bpf_list_pop_back) BTF_ID(func, bpf_list_pop_back)
BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_cast_to_kern_ctx)
BTF_ID(func, bpf_rdonly_cast)
BTF_SET_END(special_kfunc_set) BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list) BTF_ID_LIST(special_kfunc_list)
...@@ -8173,6 +8175,7 @@ BTF_ID(func, bpf_list_push_back) ...@@ -8173,6 +8175,7 @@ BTF_ID(func, bpf_list_push_back)
BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_front)
BTF_ID(func, bpf_list_pop_back) BTF_ID(func, bpf_list_pop_back)
BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_cast_to_kern_ctx)
BTF_ID(func, bpf_rdonly_cast)
static enum kfunc_ptr_arg_type static enum kfunc_ptr_arg_type
get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
...@@ -8809,6 +8812,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -8809,6 +8812,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
u32 i, nargs, func_id, ptr_type_id; u32 i, nargs, func_id, ptr_type_id;
int err, insn_idx = *insn_idx_p; int err, insn_idx = *insn_idx_p;
const struct btf_param *args; const struct btf_param *args;
const struct btf_type *ret_t;
struct btf *desc_btf; struct btf *desc_btf;
u32 *kfunc_flags; u32 *kfunc_flags;
...@@ -8888,7 +8892,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -8888,7 +8892,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) { if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
const struct btf_type *ret_t;
struct btf *ret_btf; struct btf *ret_btf;
u32 ret_btf_id; u32 ret_btf_id;
...@@ -8941,6 +8944,18 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -8941,6 +8944,18 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
regs[BPF_REG_0].btf = desc_btf; regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id; regs[BPF_REG_0].btf_id = meta.ret_btf_id;
} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
if (!ret_t || !btf_type_is_struct(ret_t)) {
verbose(env,
"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
return -EINVAL;
}
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
regs[BPF_REG_0].btf = desc_btf;
regs[BPF_REG_0].btf_id = meta.arg_constant.value;
} else { } else {
verbose(env, "kernel function %s unhandled dynamic return type\n", verbose(env, "kernel function %s unhandled dynamic return type\n",
meta.func_name); meta.func_name);
...@@ -15194,7 +15209,8 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -15194,7 +15209,8 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
insn_buf[1] = addr[1]; insn_buf[1] = addr[1];
insn_buf[2] = *insn; insn_buf[2] = *insn;
*cnt = 3; *cnt = 3;
} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
*cnt = 1; *cnt = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment