Commit 4b3ccca5 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-refcount-followups-2-owner-field'

Dave Marchevsky says:

====================
BPF Refcount followups 2: owner field

This series adds an 'owner' field to bpf_{list,rb}_node structs, to be
used by the runtime to determine whether insertion or removal operations
are valid in shared ownership scenarios. Both the races which the series
fixes and the fix itself are inspired by Kumar's suggestions in [0].

Aside from insertion and removal having more reasons to fail, there are
no user-facing changes as a result of this series.

* Patch 1 reverts disabling of bpf_refcount_acquire so that the fixed
logic can be exercised by CI. It should _not_ be applied.
* Patch 2 adds internal definitions of bpf_{rb,list}_node so that
their fields are easier to access.
* Patch 3 is the meat of the series - it adds 'owner' field and
enforcement of correct owner to insertion and removal helpers.
* Patch 4 adds a test based on Kumar's examples.
* Patch 5 disables the test until bpf_refcount_acquire is re-enabled.
* Patch 6 reverts disabling of test added in this series
logic can be exercised by CI. It should _not_ be applied.

  [0]: https://lore.kernel.org/bpf/d7hyspcow5wtjcmw4fugdgyp3fwhljwuscp3xyut5qnwivyeru@ysdq543otzv2/

Changelog:

v1 -> v2: lore.kernel.org/bpf/20230711175945.3298231-1-davemarchevsky@fb.com/

Patch 2 ("Introduce internal definitions for UAPI-opaque bpf_{rb,list}_node")
  * Rename bpf_{rb,list}_node_internal -> bpf_{list,rb}_node_kern (Alexei)

Patch 3 ("bpf: Add 'owner' field to bpf_{list,rb}_node")
  * WARN_ON_ONCE in __bpf_list_del when node has wrong owner. This shouldn't
    happen, but worth checking regardless (Alexei, offline convo)
  * Continue previous patch's renaming changes
====================

Link: https://lore.kernel.org/r/20230718083813.3416104-1-davemarchevsky@fb.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 60cc1f7d f3514a5d
...@@ -228,6 +228,18 @@ struct btf_record { ...@@ -228,6 +228,18 @@ struct btf_record {
struct btf_field fields[]; struct btf_field fields[];
}; };
/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
struct bpf_rb_node_kern {
struct rb_node rb_node;
void *owner;
} __attribute__((aligned(8)));
/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
struct bpf_list_node_kern {
struct list_head list_head;
void *owner;
} __attribute__((aligned(8)));
struct bpf_map { struct bpf_map {
/* The first two cachelines with read-mostly members of which some /* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries). * are also accessed in fast-path (e.g. ops, max_entries).
......
...@@ -7052,6 +7052,7 @@ struct bpf_list_head { ...@@ -7052,6 +7052,7 @@ struct bpf_list_head {
struct bpf_list_node { struct bpf_list_node {
__u64 :64; __u64 :64;
__u64 :64; __u64 :64;
__u64 :64;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
struct bpf_rb_root { struct bpf_rb_root {
...@@ -7063,6 +7064,7 @@ struct bpf_rb_node { ...@@ -7063,6 +7064,7 @@ struct bpf_rb_node {
__u64 :64; __u64 :64;
__u64 :64; __u64 :64;
__u64 :64; __u64 :64;
__u64 :64;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
struct bpf_refcount { struct bpf_refcount {
......
...@@ -1942,23 +1942,29 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta ...@@ -1942,23 +1942,29 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
return (void *)p__refcounted_kptr; return (void *)p__refcounted_kptr;
} }
static int __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, static int __bpf_list_add(struct bpf_list_node_kern *node,
struct bpf_list_head *head,
bool tail, struct btf_record *rec, u64 off) bool tail, struct btf_record *rec, u64 off)
{ {
struct list_head *n = (void *)node, *h = (void *)head; struct list_head *n = &node->list_head, *h = (void *)head;
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
* called on its fields, so init here * called on its fields, so init here
*/ */
if (unlikely(!h->next)) if (unlikely(!h->next))
INIT_LIST_HEAD(h); INIT_LIST_HEAD(h);
if (!list_empty(n)) {
/* node->owner != NULL implies !list_empty(n), no need to separately
* check the latter
*/
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
/* Only called from BPF prog, no need to migrate_disable */ /* Only called from BPF prog, no need to migrate_disable */
__bpf_obj_drop_impl((void *)n - off, rec); __bpf_obj_drop_impl((void *)n - off, rec);
return -EINVAL; return -EINVAL;
} }
tail ? list_add_tail(n, h) : list_add(n, h); tail ? list_add_tail(n, h) : list_add(n, h);
WRITE_ONCE(node->owner, head);
return 0; return 0;
} }
...@@ -1967,25 +1973,26 @@ __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, ...@@ -1967,25 +1973,26 @@ __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
struct bpf_list_node *node, struct bpf_list_node *node,
void *meta__ign, u64 off) void *meta__ign, u64 off)
{ {
struct bpf_list_node_kern *n = (void *)node;
struct btf_struct_meta *meta = meta__ign; struct btf_struct_meta *meta = meta__ign;
return __bpf_list_add(node, head, false, return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
meta ? meta->record : NULL, off);
} }
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
struct bpf_list_node *node, struct bpf_list_node *node,
void *meta__ign, u64 off) void *meta__ign, u64 off)
{ {
struct bpf_list_node_kern *n = (void *)node;
struct btf_struct_meta *meta = meta__ign; struct btf_struct_meta *meta = meta__ign;
return __bpf_list_add(node, head, true, return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
meta ? meta->record : NULL, off);
} }
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
{ {
struct list_head *n, *h = (void *)head; struct list_head *n, *h = (void *)head;
struct bpf_list_node_kern *node;
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
* called on its fields, so init here * called on its fields, so init here
...@@ -1994,8 +2001,14 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai ...@@ -1994,8 +2001,14 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai
INIT_LIST_HEAD(h); INIT_LIST_HEAD(h);
if (list_empty(h)) if (list_empty(h))
return NULL; return NULL;
n = tail ? h->prev : h->next; n = tail ? h->prev : h->next;
node = container_of(n, struct bpf_list_node_kern, list_head);
if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
return NULL;
list_del_init(n); list_del_init(n);
WRITE_ONCE(node->owner, NULL);
return (struct bpf_list_node *)n; return (struct bpf_list_node *)n;
} }
...@@ -2012,29 +2025,38 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) ...@@ -2012,29 +2025,38 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
struct bpf_rb_node *node) struct bpf_rb_node *node)
{ {
struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
struct rb_root_cached *r = (struct rb_root_cached *)root; struct rb_root_cached *r = (struct rb_root_cached *)root;
struct rb_node *n = (struct rb_node *)node; struct rb_node *n = &node_internal->rb_node;
if (RB_EMPTY_NODE(n)) /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
* n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
*/
if (READ_ONCE(node_internal->owner) != root)
return NULL; return NULL;
rb_erase_cached(n, r); rb_erase_cached(n, r);
RB_CLEAR_NODE(n); RB_CLEAR_NODE(n);
WRITE_ONCE(node_internal->owner, NULL);
return (struct bpf_rb_node *)n; return (struct bpf_rb_node *)n;
} }
/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
* program * program
*/ */
static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, static int __bpf_rbtree_add(struct bpf_rb_root *root,
struct bpf_rb_node_kern *node,
void *less, struct btf_record *rec, u64 off) void *less, struct btf_record *rec, u64 off)
{ {
struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
struct rb_node *parent = NULL, *n = (struct rb_node *)node; struct rb_node *parent = NULL, *n = &node->rb_node;
bpf_callback_t cb = (bpf_callback_t)less; bpf_callback_t cb = (bpf_callback_t)less;
bool leftmost = true; bool leftmost = true;
if (!RB_EMPTY_NODE(n)) { /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
* check the latter
*/
if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
/* Only called from BPF prog, no need to migrate_disable */ /* Only called from BPF prog, no need to migrate_disable */
__bpf_obj_drop_impl((void *)n - off, rec); __bpf_obj_drop_impl((void *)n - off, rec);
return -EINVAL; return -EINVAL;
...@@ -2052,6 +2074,7 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, ...@@ -2052,6 +2074,7 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
rb_link_node(n, parent, link); rb_link_node(n, parent, link);
rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
WRITE_ONCE(node->owner, root);
return 0; return 0;
} }
...@@ -2060,8 +2083,9 @@ __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node ...@@ -2060,8 +2083,9 @@ __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node
void *meta__ign, u64 off) void *meta__ign, u64 off)
{ {
struct btf_struct_meta *meta = meta__ign; struct btf_struct_meta *meta = meta__ign;
struct bpf_rb_node_kern *n = (void *)node;
return __bpf_rbtree_add(root, node, (void *)less, meta ? meta->record : NULL, off); return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
} }
__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
......
...@@ -23,7 +23,7 @@ static struct { ...@@ -23,7 +23,7 @@ static struct {
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \ "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
{ #test "_missing_lock_pop_back", \ { #test "_missing_lock_pop_back", \
"bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
TEST(kptr, 32) TEST(kptr, 40)
TEST(global, 16) TEST(global, 16)
TEST(map, 0) TEST(map, 0)
TEST(inner_map, 0) TEST(inner_map, 0)
...@@ -31,7 +31,7 @@ static struct { ...@@ -31,7 +31,7 @@ static struct {
#define TEST(test, op) \ #define TEST(test, op) \
{ #test "_kptr_incorrect_lock_" #op, \ { #test "_kptr_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \ "held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=32 must be held for bpf_list_head" }, \ "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
{ #test "_global_incorrect_lock_" #op, \ { #test "_global_incorrect_lock_" #op, \
"held lock and object are not in the same allocation\n" \ "held lock and object are not in the same allocation\n" \
"bpf_spin_lock at off=16 must be held for bpf_list_head" }, \ "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
...@@ -84,23 +84,23 @@ static struct { ...@@ -84,23 +84,23 @@ static struct {
{ "double_push_back", "arg#1 expected pointer to allocated object" }, { "double_push_back", "arg#1 expected pointer to allocated object" },
{ "no_node_value_type", "bpf_list_node not found at offset=0" }, { "no_node_value_type", "bpf_list_node not found at offset=0" },
{ "incorrect_value_type", { "incorrect_value_type",
"operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, " "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
"but arg is at offset=0 in struct bar" }, "but arg is at offset=0 in struct bar" },
{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
{ "incorrect_node_off1", "bpf_list_node not found at offset=41" }, { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
{ "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" }, { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
{ "no_head_type", "bpf_list_head not found at offset=0" }, { "no_head_type", "bpf_list_head not found at offset=0" },
{ "incorrect_head_var_off1", "R1 doesn't have constant offset" }, { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
{ "incorrect_head_off1", "bpf_list_head not found at offset=17" }, { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
{ "incorrect_head_off2", "bpf_list_head not found at offset=1" }, { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
{ "pop_front_off", { "pop_front_off",
"15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
"R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
"16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
{ "pop_back_off", { "pop_back_off",
"15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) " "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) "
"R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n" "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n"
"16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
}; };
...@@ -257,7 +257,7 @@ static struct btf *init_btf(void) ...@@ -257,7 +257,7 @@ static struct btf *init_btf(void)
hid = btf__add_struct(btf, "bpf_list_head", 16); hid = btf__add_struct(btf, "bpf_list_head", 16);
if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head")) if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
goto end; goto end;
nid = btf__add_struct(btf, "bpf_list_node", 16); nid = btf__add_struct(btf, "bpf_list_node", 24);
if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node")) if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
goto end; goto end;
return btf; return btf;
...@@ -276,7 +276,7 @@ static void list_and_rb_node_same_struct(bool refcount_field) ...@@ -276,7 +276,7 @@ static void list_and_rb_node_same_struct(bool refcount_field)
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
return; return;
bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24); bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node")) if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
return; return;
...@@ -286,17 +286,17 @@ static void list_and_rb_node_same_struct(bool refcount_field) ...@@ -286,17 +286,17 @@ static void list_and_rb_node_same_struct(bool refcount_field)
return; return;
} }
id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40); id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
if (!ASSERT_GT(id, 0, "btf__add_struct bar")) if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
return; return;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0); err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
if (!ASSERT_OK(err, "btf__add_field bar::a")) if (!ASSERT_OK(err, "btf__add_field bar::a"))
return; return;
err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0); err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c")) if (!ASSERT_OK(err, "btf__add_field bar::c"))
return; return;
if (refcount_field) { if (refcount_field) {
err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0); err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
if (!ASSERT_OK(err, "btf__add_field bar::ref")) if (!ASSERT_OK(err, "btf__add_field bar::ref"))
return; return;
} }
...@@ -527,7 +527,7 @@ static void test_btf(void) ...@@ -527,7 +527,7 @@ static void test_btf(void)
btf = init_btf(); btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
break; break;
id = btf__add_struct(btf, "foo", 36); id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -536,7 +536,7 @@ static void test_btf(void) ...@@ -536,7 +536,7 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b")) if (!ASSERT_OK(err, "btf__add_field foo::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c")) if (!ASSERT_OK(err, "btf__add_field foo::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0); id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
...@@ -553,7 +553,7 @@ static void test_btf(void) ...@@ -553,7 +553,7 @@ static void test_btf(void)
btf = init_btf(); btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
break; break;
id = btf__add_struct(btf, "foo", 36); id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -562,13 +562,13 @@ static void test_btf(void) ...@@ -562,13 +562,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b")) if (!ASSERT_OK(err, "btf__add_field foo::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c")) if (!ASSERT_OK(err, "btf__add_field foo::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break; break;
id = btf__add_struct(btf, "bar", 36); id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -577,7 +577,7 @@ static void test_btf(void) ...@@ -577,7 +577,7 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b")) if (!ASSERT_OK(err, "btf__add_field bar::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c")) if (!ASSERT_OK(err, "btf__add_field bar::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0); id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
...@@ -594,19 +594,19 @@ static void test_btf(void) ...@@ -594,19 +594,19 @@ static void test_btf(void)
btf = init_btf(); btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
break; break;
id = btf__add_struct(btf, "foo", 20); id = btf__add_struct(btf, "foo", 28);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a")) if (!ASSERT_OK(err, "btf__add_field foo::a"))
break; break;
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b")) if (!ASSERT_OK(err, "btf__add_field foo::b"))
break; break;
id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0); id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a")) if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
break; break;
id = btf__add_struct(btf, "bar", 16); id = btf__add_struct(btf, "bar", 24);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break; break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0); err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
...@@ -623,19 +623,19 @@ static void test_btf(void) ...@@ -623,19 +623,19 @@ static void test_btf(void)
btf = init_btf(); btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
break; break;
id = btf__add_struct(btf, "foo", 20); id = btf__add_struct(btf, "foo", 28);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
if (!ASSERT_OK(err, "btf__add_field foo::a")) if (!ASSERT_OK(err, "btf__add_field foo::a"))
break; break;
err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b")) if (!ASSERT_OK(err, "btf__add_field foo::b"))
break; break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break; break;
id = btf__add_struct(btf, "bar", 36); id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -644,13 +644,13 @@ static void test_btf(void) ...@@ -644,13 +644,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b")) if (!ASSERT_OK(err, "btf__add_field bar::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c")) if (!ASSERT_OK(err, "btf__add_field bar::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
break; break;
id = btf__add_struct(btf, "baz", 16); id = btf__add_struct(btf, "baz", 24);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break; break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0); err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
...@@ -667,7 +667,7 @@ static void test_btf(void) ...@@ -667,7 +667,7 @@ static void test_btf(void)
btf = init_btf(); btf = init_btf();
if (!ASSERT_OK_PTR(btf, "init_btf")) if (!ASSERT_OK_PTR(btf, "init_btf"))
break; break;
id = btf__add_struct(btf, "foo", 36); id = btf__add_struct(btf, "foo", 44);
if (!ASSERT_EQ(id, 5, "btf__add_struct foo")) if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -676,13 +676,13 @@ static void test_btf(void) ...@@ -676,13 +676,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field foo::b")) if (!ASSERT_OK(err, "btf__add_field foo::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field foo::c")) if (!ASSERT_OK(err, "btf__add_field foo::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break; break;
id = btf__add_struct(btf, "bar", 36); id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -691,13 +691,13 @@ static void test_btf(void) ...@@ -691,13 +691,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar:b")) if (!ASSERT_OK(err, "btf__add_field bar:b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar:c")) if (!ASSERT_OK(err, "btf__add_field bar:c"))
break; break;
id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0); id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a")) if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
break; break;
id = btf__add_struct(btf, "baz", 16); id = btf__add_struct(btf, "baz", 24);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break; break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0); err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
...@@ -726,7 +726,7 @@ static void test_btf(void) ...@@ -726,7 +726,7 @@ static void test_btf(void)
id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0); id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b")) if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
break; break;
id = btf__add_struct(btf, "bar", 36); id = btf__add_struct(btf, "bar", 44);
if (!ASSERT_EQ(id, 7, "btf__add_struct bar")) if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -735,13 +735,13 @@ static void test_btf(void) ...@@ -735,13 +735,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b")) if (!ASSERT_OK(err, "btf__add_field bar::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c")) if (!ASSERT_OK(err, "btf__add_field bar::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0); id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
if (!ASSERT_EQ(id, 8, "btf__add_decl_tag")) if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
break; break;
id = btf__add_struct(btf, "baz", 36); id = btf__add_struct(btf, "baz", 44);
if (!ASSERT_EQ(id, 9, "btf__add_struct baz")) if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
break; break;
err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
...@@ -750,13 +750,13 @@ static void test_btf(void) ...@@ -750,13 +750,13 @@ static void test_btf(void)
err = btf__add_field(btf, "b", LIST_NODE, 128, 0); err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
if (!ASSERT_OK(err, "btf__add_field bar::b")) if (!ASSERT_OK(err, "btf__add_field bar::b"))
break; break;
err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0); err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
if (!ASSERT_OK(err, "btf__add_field bar::c")) if (!ASSERT_OK(err, "btf__add_field bar::c"))
break; break;
id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0); id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a")) if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
break; break;
id = btf__add_struct(btf, "bam", 16); id = btf__add_struct(btf, "bam", 24);
if (!ASSERT_EQ(id, 11, "btf__add_struct bam")) if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
break; break;
err = btf__add_field(btf, "a", LIST_NODE, 0, 0); err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
......
...@@ -14,3 +14,7 @@ void test_refcounted_kptr(void) ...@@ -14,3 +14,7 @@ void test_refcounted_kptr(void)
void test_refcounted_kptr_fail(void) void test_refcounted_kptr_fail(void)
{ {
} }
void test_refcounted_kptr_wrong_owner(void)
{
}
...@@ -24,7 +24,7 @@ struct { ...@@ -24,7 +24,7 @@ struct {
__uint(type, BPF_MAP_TYPE_ARRAY); __uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, int); __type(key, int);
__type(value, struct map_value); __type(value, struct map_value);
__uint(max_entries, 1); __uint(max_entries, 2);
} stashed_nodes SEC(".maps"); } stashed_nodes SEC(".maps");
struct node_acquire { struct node_acquire {
...@@ -42,6 +42,9 @@ private(A) struct bpf_list_head head __contains(node_data, l); ...@@ -42,6 +42,9 @@ private(A) struct bpf_list_head head __contains(node_data, l);
private(B) struct bpf_spin_lock alock; private(B) struct bpf_spin_lock alock;
private(B) struct bpf_rb_root aroot __contains(node_acquire, node); private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
private(C) struct bpf_spin_lock block;
private(C) struct bpf_rb_root broot __contains(node_data, r);
static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
{ {
struct node_data *a; struct node_data *a;
...@@ -405,4 +408,93 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) ...@@ -405,4 +408,93 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
return 0; return 0;
} }
static long __stash_map_empty_xchg(struct node_data *n, int idx)
{
struct map_value *mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval) {
bpf_obj_drop(n);
return 1;
}
n = bpf_kptr_xchg(&mapval->node, n);
if (n) {
bpf_obj_drop(n);
return 2;
}
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_a1(void *ctx)
{
struct node_data *n, *m;
n = bpf_obj_new(typeof(*n));
if (!n)
return 1;
m = bpf_refcount_acquire(n);
if (__stash_map_empty_xchg(n, 0)) {
bpf_obj_drop(m);
return 2;
}
if (__stash_map_empty_xchg(m, 1))
return 3;
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_b(void *ctx)
{
struct map_value *mapval;
struct node_data *n;
int idx = 0;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return 1;
n = bpf_kptr_xchg(&mapval->node, NULL);
if (!n)
return 2;
bpf_spin_lock(&block);
bpf_rbtree_add(&broot, &n->r, less);
bpf_spin_unlock(&block);
return 0;
}
SEC("tc")
long rbtree_wrong_owner_remove_fail_a2(void *ctx)
{
struct map_value *mapval;
struct bpf_rb_node *res;
struct node_data *m;
int idx = 1;
mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
if (!mapval)
return 1;
m = bpf_kptr_xchg(&mapval->node, NULL);
if (!m)
return 2;
bpf_spin_lock(&lock);
/* make m non-owning ref */
bpf_list_push_back(&head, &m->l);
res = bpf_rbtree_remove(&root, &m->r);
bpf_spin_unlock(&lock);
if (res) {
bpf_obj_drop(container_of(res, struct node_data, r));
return 3;
}
return 0;
}
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment