Commit 6e10b635 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'introduce-bpf_wq'

Benjamin Tissoires says:

====================
Introduce bpf_wq

This is a followup of sleepable bpf_timer[0].

When discussing sleepable bpf_timer, it was thought that we should give
a try to bpf_wq, as the 2 APIs are similar but distinct enough to
justify a new one.

So here it is.

I tried to keep as much as possible common code in kernel/bpf/helpers.c
but I couldn't get away with code duplication in kernel/bpf/verifier.c.

This series introduces a basic bpf_wq support:
- creation is supported
- assignment is supported
- running a simple bpf_wq is also supported.

We will probably need to extend the API further with:
- a full delayed_work API (can be piggy backed on top with a correct
  flag)
- bpf_wq_cancel() <- apparently not, this is shooting ourself in the
  foot
- bpf_wq_cancel_sync() (for sleepable programs)
- documentation
---

For reference, the use cases I have in mind:

---

Basically, I need to be able to defer a HID-BPF program for the
following reasons (from the aforementioned patch):
1. defer an event:
   Sometimes we receive an out of proximity event, but the device can not
   be trusted enough, and we need to ensure that we won't receive another
   one in the following n milliseconds. So we need to wait those n
   milliseconds, and eventually re-inject that event in the stack.

2. inject new events in reaction to one given event:
   We might want to transform one given event into several. This is the
   case for macro keys where a single key press is supposed to send
   a sequence of key presses. But this could also be used to patch a
   faulty behavior, if a device forgets to send a release event.

3. communicate with the device in reaction to one event:
   We might want to communicate back to the device after a given event.
   For example a device might send us an event saying that it came back
   from sleeping state and needs to be re-initialized.

Currently we can achieve that by keeping a userspace program around,
raise a bpf event, and let that userspace program inject the events and
commands.
However, we are just keeping that program alive as a daemon for just
scheduling commands. There is no logic in it, so it doesn't really justify
an actual userspace wakeup. So a kernel workqueue seems simpler to handle.

bpf_timers are currently running in a soft IRQ context, this patch
series implements a sleppable context for them.

Cheers,
Benjamin

[0] https://lore.kernel.org/all/20240408-hid-bpf-sleepable-v6-0-0499ddd91b94@kernel.org/

Changes in v2:
- took previous review into account
- mainly dropped BPF_F_WQ_SLEEPABLE
- Link to v1: https://lore.kernel.org/r/20240416-bpf_wq-v1-0-c9e66092f842@kernel.org

====================

Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-0-6c986a5a741f@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents a7de265c 8290dba5
......@@ -185,7 +185,7 @@ struct bpf_map_ops {
enum {
/* Support at most 10 fields in a BTF type */
BTF_FIELDS_MAX = 10,
BTF_FIELDS_MAX = 11,
};
enum btf_field_type {
......@@ -202,6 +202,7 @@ enum btf_field_type {
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
BPF_WORKQUEUE = (1 << 10),
};
typedef void (*btf_dtor_kfunc_t)(void *);
......@@ -238,6 +239,7 @@ struct btf_record {
u32 field_mask;
int spin_lock_off;
int timer_off;
int wq_off;
int refcount_off;
struct btf_field fields[];
};
......@@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_spin_lock";
case BPF_TIMER:
return "bpf_timer";
case BPF_WORKQUEUE:
return "bpf_wq";
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
......@@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_spin_lock);
case BPF_TIMER:
return sizeof(struct bpf_timer);
case BPF_WORKQUEUE:
return sizeof(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
......@@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_spin_lock);
case BPF_TIMER:
return __alignof__(struct bpf_timer);
case BPF_WORKQUEUE:
return __alignof__(struct bpf_wq);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
......@@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_WORKQUEUE:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
......@@ -525,6 +534,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
void bpf_wq_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
......@@ -2195,6 +2205,7 @@ void bpf_map_free_record(struct bpf_map *map);
struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
......
......@@ -426,6 +426,7 @@ struct bpf_verifier_state {
* while they are still in use.
*/
bool used_as_loop_entry;
bool in_sleepable;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
......
......@@ -7306,6 +7306,10 @@ struct bpf_timer {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 __opaque[2];
} __attribute__((aligned(8)));
......
......@@ -428,17 +428,21 @@ static void *array_map_vmalloc_addr(struct bpf_array *array)
return (void *)round_down((unsigned long)array, PAGE_SIZE);
}
static void array_map_free_timers(struct bpf_map *map)
static void array_map_free_timers_wq(struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
/* We don't reset or free fields other than timer on uref dropping to zero. */
if (!btf_record_has_field(map->record, BPF_TIMER))
return;
/* We don't reset or free fields other than timer and workqueue
* on uref dropping to zero.
*/
if (btf_record_has_field(map->record, BPF_TIMER))
for (i = 0; i < array->map.max_entries; i++)
bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
for (i = 0; i < array->map.max_entries; i++)
bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
if (btf_record_has_field(map->record, BPF_WORKQUEUE))
for (i = 0; i < array->map.max_entries; i++)
bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
......@@ -782,7 +786,7 @@ const struct bpf_map_ops array_map_ops = {
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_release_uref = array_map_free_timers,
.map_release_uref = array_map_free_timers_wq,
.map_lookup_elem = array_map_lookup_elem,
.map_update_elem = array_map_update_elem,
.map_delete_elem = array_map_delete_elem,
......
......@@ -3464,6 +3464,15 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
goto end;
}
}
if (field_mask & BPF_WORKQUEUE) {
if (!strcmp(name, "bpf_wq")) {
if (*seen_mask & BPF_WORKQUEUE)
return -E2BIG;
*seen_mask |= BPF_WORKQUEUE;
type = BPF_WORKQUEUE;
goto end;
}
}
field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
......@@ -3515,6 +3524,7 @@ static int btf_find_struct_field(const struct btf *btf,
switch (field_type) {
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_WORKQUEUE:
case BPF_LIST_NODE:
case BPF_RB_NODE:
case BPF_REFCOUNT:
......@@ -3582,6 +3592,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
switch (field_type) {
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_WORKQUEUE:
case BPF_LIST_NODE:
case BPF_RB_NODE:
case BPF_REFCOUNT:
......@@ -3816,6 +3827,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
rec->spin_lock_off = -EINVAL;
rec->timer_off = -EINVAL;
rec->wq_off = -EINVAL;
rec->refcount_off = -EINVAL;
for (i = 0; i < cnt; i++) {
field_type_size = btf_field_type_size(info_arr[i].type);
......@@ -3846,6 +3858,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
/* Cache offset for faster lookup at runtime */
rec->timer_off = rec->fields[i].offset;
break;
case BPF_WORKQUEUE:
WARN_ON_ONCE(rec->wq_off >= 0);
/* Cache offset for faster lookup at runtime */
rec->wq_off = rec->fields[i].offset;
break;
case BPF_REFCOUNT:
WARN_ON_ONCE(rec->refcount_off >= 0);
/* Cache offset for faster lookup at runtime */
......
......@@ -240,6 +240,26 @@ static void htab_free_prealloced_timers(struct bpf_htab *htab)
}
}
static void htab_free_prealloced_wq(struct bpf_htab *htab)
{
u32 num_entries = htab->map.max_entries;
int i;
if (!btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
return;
if (htab_has_extra_elems(htab))
num_entries += num_possible_cpus();
for (i = 0; i < num_entries; i++) {
struct htab_elem *elem;
elem = get_htab_elem(htab, i);
bpf_obj_free_workqueue(htab->map.record,
elem->key + round_up(htab->map.key_size, 8));
cond_resched();
}
}
static void htab_free_prealloced_fields(struct bpf_htab *htab)
{
u32 num_entries = htab->map.max_entries;
......@@ -1495,7 +1515,7 @@ static void delete_all_elements(struct bpf_htab *htab)
migrate_enable();
}
static void htab_free_malloced_timers(struct bpf_htab *htab)
static void htab_free_malloced_timers_or_wq(struct bpf_htab *htab, bool is_timer)
{
int i;
......@@ -1507,24 +1527,35 @@ static void htab_free_malloced_timers(struct bpf_htab *htab)
hlist_nulls_for_each_entry(l, n, head, hash_node) {
/* We only free timer on uref dropping to zero */
bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8));
if (is_timer)
bpf_obj_free_timer(htab->map.record,
l->key + round_up(htab->map.key_size, 8));
else
bpf_obj_free_workqueue(htab->map.record,
l->key + round_up(htab->map.key_size, 8));
}
cond_resched_rcu();
}
rcu_read_unlock();
}
static void htab_map_free_timers(struct bpf_map *map)
static void htab_map_free_timers_and_wq(struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
/* We only free timer on uref dropping to zero */
if (!btf_record_has_field(htab->map.record, BPF_TIMER))
return;
if (!htab_is_prealloc(htab))
htab_free_malloced_timers(htab);
else
htab_free_prealloced_timers(htab);
/* We only free timer and workqueue on uref dropping to zero */
if (btf_record_has_field(htab->map.record, BPF_TIMER)) {
if (!htab_is_prealloc(htab))
htab_free_malloced_timers_or_wq(htab, true);
else
htab_free_prealloced_timers(htab);
}
if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) {
if (!htab_is_prealloc(htab))
htab_free_malloced_timers_or_wq(htab, false);
else
htab_free_prealloced_wq(htab);
}
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
......@@ -2260,7 +2291,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_release_uref = htab_map_free_timers,
.map_release_uref = htab_map_free_timers_and_wq,
.map_lookup_elem = htab_map_lookup_elem,
.map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
.map_update_elem = htab_map_update_elem,
......@@ -2281,7 +2312,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key,
.map_release_uref = htab_map_free_timers,
.map_release_uref = htab_map_free_timers_and_wq,
.map_lookup_elem = htab_lru_map_lookup_elem,
.map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
......
This diff is collapsed.
......@@ -559,6 +559,7 @@ void btf_record_free(struct btf_record *rec)
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_REFCOUNT:
case BPF_WORKQUEUE:
/* Nothing to release */
break;
default:
......@@ -608,6 +609,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
case BPF_SPIN_LOCK:
case BPF_TIMER:
case BPF_REFCOUNT:
case BPF_WORKQUEUE:
/* Nothing to acquire */
break;
default:
......@@ -659,6 +661,13 @@ void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
bpf_timer_cancel_and_free(obj + rec->timer_off);
}
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
{
if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
return;
bpf_wq_cancel_and_free(obj + rec->wq_off);
}
void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
{
const struct btf_field *fields;
......@@ -679,6 +688,9 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
case BPF_TIMER:
bpf_timer_cancel_and_free(field_ptr);
break;
case BPF_WORKQUEUE:
bpf_wq_cancel_and_free(field_ptr);
break;
case BPF_KPTR_UNREF:
WRITE_ONCE(*(u64 *)field_ptr, 0);
break;
......@@ -1085,7 +1097,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
map->record = btf_parse_fields(btf, value_type,
BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
BPF_RB_ROOT | BPF_REFCOUNT,
BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
map->value_size);
if (!IS_ERR_OR_NULL(map->record)) {
int i;
......@@ -1115,6 +1127,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
}
break;
case BPF_TIMER:
case BPF_WORKQUEUE:
if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
map->map_type != BPF_MAP_TYPE_ARRAY) {
......
This diff is collapsed.
......@@ -7306,6 +7306,10 @@ struct bpf_timer {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_wq {
__u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
__u64 __opaque[2];
} __attribute__((aligned(8)));
......
......@@ -470,4 +470,11 @@ extern int bpf_iter_css_new(struct bpf_iter_css *it,
extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
int (callback_fn)(void *map, int *key, struct bpf_wq *wq),
unsigned int flags__k, void *aux__ign) __ksym;
#define bpf_wq_set_callback(timer, cb, flags) \
bpf_wq_set_callback_impl(timer, cb, flags, NULL)
#endif
......@@ -494,6 +494,10 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused
return arg;
}
__bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
{
}
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
......@@ -520,6 +524,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf)
......
......@@ -96,6 +96,7 @@ void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym;
void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym;
void bpf_kfunc_call_test_destructive(void) __ksym;
void bpf_kfunc_call_test_sleepable(void) __ksym;
void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p);
struct prog_test_member *bpf_kfunc_call_memb_acquire(void);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Benjamin Tissoires */
#include <test_progs.h>
#include "wq.skel.h"
#include "wq_failures.skel.h"
void serial_test_wq(void)
{
struct wq *wq_skel = NULL;
int err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
RUN_TESTS(wq);
/* re-run the success test to check if the timer was actually executed */
wq_skel = wq__open_and_load();
if (!ASSERT_OK_PTR(wq_skel, "wq_skel_load"))
return;
err = wq__attach(wq_skel);
if (!ASSERT_OK(err, "wq_attach"))
return;
prog_fd = bpf_program__fd(wq_skel->progs.test_syscall_array_sleepable);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
usleep(50); /* 10 usecs should be enough, but give it extra */
ASSERT_EQ(wq_skel->bss->ok_sleepable, (1 << 1), "ok_sleepable");
}
void serial_test_failures_wq(void)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
RUN_TESTS(wq_failures);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Benjamin Tissoires
*/
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
struct hmap_elem {
int counter;
struct bpf_timer timer; /* unused */
struct bpf_spin_lock lock; /* unused */
struct bpf_wq work;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct hmap_elem);
} hmap SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(max_entries, 1000);
__type(key, int);
__type(value, struct hmap_elem);
} hmap_malloc SEC(".maps");
struct elem {
struct bpf_wq w;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, struct elem);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 4);
__type(key, int);
__type(value, struct elem);
} lru SEC(".maps");
__u32 ok;
__u32 ok_sleepable;
static int test_elem_callback(void *map, int *key,
int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
{
struct elem init = {}, *val;
struct bpf_wq *wq;
if ((ok & (1 << *key) ||
(ok_sleepable & (1 << *key))))
return -22;
if (map == &lru &&
bpf_map_update_elem(map, key, &init, 0))
return -1;
val = bpf_map_lookup_elem(map, key);
if (!val)
return -2;
wq = &val->w;
if (bpf_wq_init(wq, map, 0) != 0)
return -3;
if (bpf_wq_set_callback(wq, callback_fn, 0))
return -4;
if (bpf_wq_start(wq, 0))
return -5;
return 0;
}
static int test_hmap_elem_callback(void *map, int *key,
int (callback_fn)(void *map, int *key, struct bpf_wq *wq))
{
struct hmap_elem init = {}, *val;
struct bpf_wq *wq;
if ((ok & (1 << *key) ||
(ok_sleepable & (1 << *key))))
return -22;
if (bpf_map_update_elem(map, key, &init, 0))
return -1;
val = bpf_map_lookup_elem(map, key);
if (!val)
return -2;
wq = &val->work;
if (bpf_wq_init(wq, map, 0) != 0)
return -3;
if (bpf_wq_set_callback(wq, callback_fn, 0))
return -4;
if (bpf_wq_start(wq, 0))
return -5;
return 0;
}
/* callback for non sleepable workqueue */
static int wq_callback(void *map, int *key, struct bpf_wq *work)
{
bpf_kfunc_common_test();
ok |= (1 << *key);
return 0;
}
/* callback for sleepable workqueue */
static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
{
bpf_kfunc_call_test_sleepable();
ok_sleepable |= (1 << *key);
return 0;
}
SEC("tc")
/* test that workqueues can be used from an array */
__retval(0)
long test_call_array_sleepable(void *ctx)
{
int key = 0;
return test_elem_callback(&array, &key, wq_cb_sleepable);
}
SEC("syscall")
/* Same test than above but from a sleepable context. */
__retval(0)
long test_syscall_array_sleepable(void *ctx)
{
int key = 1;
return test_elem_callback(&array, &key, wq_cb_sleepable);
}
SEC("tc")
/* test that workqueues can be used from a hashmap */
__retval(0)
long test_call_hash_sleepable(void *ctx)
{
int key = 2;
return test_hmap_elem_callback(&hmap, &key, wq_callback);
}
SEC("tc")
/* test that workqueues can be used from a hashmap with NO_PREALLOC. */
__retval(0)
long test_call_hash_malloc_sleepable(void *ctx)
{
int key = 3;
return test_hmap_elem_callback(&hmap_malloc, &key, wq_callback);
}
SEC("tc")
/* test that workqueues can be used from a LRU map */
__retval(0)
long test_call_lru_sleepable(void *ctx)
{
int key = 4;
return test_elem_callback(&lru, &key, wq_callback);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Benjamin Tissoires
*/
#include "bpf_experimental.h"
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
struct elem {
struct bpf_wq w;
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 2);
__type(key, int);
__type(value, struct elem);
} array SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 4);
__type(key, int);
__type(value, struct elem);
} lru SEC(".maps");
/* callback for non sleepable workqueue */
static int wq_callback(void *map, int *key, struct bpf_wq *work)
{
bpf_kfunc_common_test();
return 0;
}
/* callback for sleepable workqueue */
static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work)
{
bpf_kfunc_call_test_sleepable();
return 0;
}
SEC("tc")
/* test that bpf_wq_init takes a map as a second argument
*/
__log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
__failure
__msg(": (85) call bpf_wq_init#") /* anchor message */
__msg("pointer in R2 isn't map pointer")
long test_wq_init_nomap(void *ctx)
{
struct bpf_wq *wq;
struct elem *val;
int key = 0;
val = bpf_map_lookup_elem(&array, &key);
if (!val)
return -1;
wq = &val->w;
if (bpf_wq_init(wq, &key, 0) != 0)
return -3;
return 0;
}
SEC("tc")
/* test that the workqueue is part of the map in bpf_wq_init
*/
__log_level(2)
__flag(BPF_F_TEST_STATE_FREQ)
__failure
__msg(": (85) call bpf_wq_init#") /* anchor message */
__msg("workqueue pointer in R1 map_uid=0 doesn't match map pointer in R2 map_uid=0")
long test_wq_init_wrong_map(void *ctx)
{
struct bpf_wq *wq;
struct elem *val;
int key = 0;
val = bpf_map_lookup_elem(&array, &key);
if (!val)
return -1;
wq = &val->w;
if (bpf_wq_init(wq, &lru, 0) != 0)
return -3;
return 0;
}
SEC("?tc")
__log_level(2)
__failure
/* check that the first argument of bpf_wq_set_callback()
* is a correct bpf_wq pointer.
*/
__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
__msg("arg#0 doesn't point to a map value")
long test_wrong_wq_pointer(void *ctx)
{
int key = 0;
struct bpf_wq *wq;
wq = bpf_map_lookup_elem(&array, &key);
if (!wq)
return 1;
if (bpf_wq_init(wq, &array, 0))
return 2;
if (bpf_wq_set_callback((void *)&wq, wq_callback, 0))
return 3;
return -22;
}
SEC("?tc")
__log_level(2)
__failure
/* check that the first argument of bpf_wq_set_callback()
* is a correct bpf_wq pointer.
*/
__msg(": (85) call bpf_wq_set_callback_impl#") /* anchor message */
__msg("off 1 doesn't point to 'struct bpf_wq' that is at 0")
long test_wrong_wq_pointer_offset(void *ctx)
{
int key = 0;
struct bpf_wq *wq;
wq = bpf_map_lookup_elem(&array, &key);
if (!wq)
return 1;
if (bpf_wq_init(wq, &array, 0))
return 2;
if (bpf_wq_set_callback((void *)wq + 1, wq_cb_sleepable, 0))
return 3;
return -22;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment