Commit 30465003 authored by Dave Marchevsky's avatar Dave Marchevsky Committed by Alexei Starovoitov

bpf: rename list_head -> graph_root in field info types

Many of the structs recently added to track field info for linked-list
head are useful as-is for rbtree root. So let's do a mechanical renaming
of list_head-related types and fields:

include/linux/bpf.h:
  struct btf_field_list_head -> struct btf_field_graph_root
  list_head -> graph_root in struct btf_field union
kernel/bpf/btf.c:
  list_head -> graph_root in struct btf_field_info

This is a nonfunctional change, functionality to actually use these
fields for rbtree will be added in further patches.
Signed-off-by: default avatarDave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20221217082506.1570898-5-davemarchevsky@fb.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 07453245
......@@ -189,7 +189,7 @@ struct btf_field_kptr {
u32 btf_id;
};
struct btf_field_list_head {
struct btf_field_graph_root {
struct btf *btf;
u32 value_btf_id;
u32 node_offset;
......@@ -201,7 +201,7 @@ struct btf_field {
enum btf_field_type type;
union {
struct btf_field_kptr kptr;
struct btf_field_list_head list_head;
struct btf_field_graph_root graph_root;
};
};
......
......@@ -3228,7 +3228,7 @@ struct btf_field_info {
struct {
const char *node_name;
u32 value_btf_id;
} list_head;
} graph_root;
};
};
......@@ -3335,8 +3335,8 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
return -EINVAL;
info->type = BPF_LIST_HEAD;
info->off = off;
info->list_head.value_btf_id = id;
info->list_head.node_name = list_node;
info->graph_root.value_btf_id = id;
info->graph_root.node_name = list_node;
return BTF_FIELD_FOUND;
}
......@@ -3604,13 +3604,14 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
u32 offset;
int i;
t = btf_type_by_id(btf, info->list_head.value_btf_id);
t = btf_type_by_id(btf, info->graph_root.value_btf_id);
/* We've already checked that value_btf_id is a struct type. We
* just need to figure out the offset of the list_node, and
* verify its type.
*/
for_each_member(i, t, member) {
if (strcmp(info->list_head.node_name, __btf_name_by_offset(btf, member->name_off)))
if (strcmp(info->graph_root.node_name,
__btf_name_by_offset(btf, member->name_off)))
continue;
/* Invalid BTF, two members with same name */
if (n)
......@@ -3627,9 +3628,9 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
if (offset % __alignof__(struct bpf_list_node))
return -EINVAL;
field->list_head.btf = (struct btf *)btf;
field->list_head.value_btf_id = info->list_head.value_btf_id;
field->list_head.node_offset = offset;
field->graph_root.btf = (struct btf *)btf;
field->graph_root.value_btf_id = info->graph_root.value_btf_id;
field->graph_root.node_offset = offset;
}
if (!n)
return -ENOENT;
......@@ -3736,11 +3737,11 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
if (!(rec->fields[i].type & BPF_LIST_HEAD))
continue;
btf_id = rec->fields[i].list_head.value_btf_id;
btf_id = rec->fields[i].graph_root.value_btf_id;
meta = btf_find_struct_meta(btf, btf_id);
if (!meta)
return -EFAULT;
rec->fields[i].list_head.value_rec = meta->record;
rec->fields[i].graph_root.value_rec = meta->record;
if (!(rec->field_mask & BPF_LIST_NODE))
continue;
......
......@@ -1756,12 +1756,12 @@ void bpf_list_head_free(const struct btf_field *field, void *list_head,
while (head != orig_head) {
void *obj = head;
obj -= field->list_head.node_offset;
obj -= field->graph_root.node_offset;
head = head->next;
/* The contained type can also have resources, including a
* bpf_list_head which needs to be freed.
*/
bpf_obj_free_fields(field->list_head.value_rec, obj);
bpf_obj_free_fields(field->graph_root.value_rec, obj);
/* bpf_mem_free requires migrate_disable(), since we can be
* called from map free path as well apart from BPF program (as
* part of map ops doing bpf_obj_free_fields).
......
......@@ -8776,21 +8776,22 @@ static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
field = meta->arg_list_head.field;
et = btf_type_by_id(field->list_head.btf, field->list_head.value_btf_id);
et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
t = btf_type_by_id(reg->btf, reg->btf_id);
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->list_head.btf,
field->list_head.value_btf_id, true)) {
if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
field->graph_root.value_btf_id, true)) {
verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
"in struct %s, but arg is at offset=%d in struct %s\n",
field->list_head.node_offset, btf_name_by_offset(field->list_head.btf, et->name_off),
field->graph_root.node_offset,
btf_name_by_offset(field->graph_root.btf, et->name_off),
list_node_off, btf_name_by_offset(reg->btf, t->name_off));
return -EINVAL;
}
if (list_node_off != field->list_head.node_offset) {
if (list_node_off != field->graph_root.node_offset) {
verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
list_node_off, field->list_head.node_offset,
btf_name_by_offset(field->list_head.btf, et->name_off));
list_node_off, field->graph_root.node_offset,
btf_name_by_offset(field->graph_root.btf, et->name_off));
return -EINVAL;
}
/* Set arg#1 for expiration after unlock */
......@@ -9232,9 +9233,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
regs[BPF_REG_0].btf = field->list_head.btf;
regs[BPF_REG_0].btf_id = field->list_head.value_btf_id;
regs[BPF_REG_0].off = field->list_head.node_offset;
regs[BPF_REG_0].btf = field->graph_root.btf;
regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id;
regs[BPF_REG_0].off = field->graph_root.node_offset;
} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment