Commit 19f4b532 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpftool: Fix skeletons compilation for older kernels'

Quentin Monnet says:

====================
At runtime, bpftool may run its own BPF programs to get the pids of
processes referencing BPF programs, or to profile programs. The skeletons
for these programs rely on a vmlinux.h header and may fail to compile when
building bpftool on hosts running older kernels, where some structs or
enums are not defined. In this set, we address this issue by using local
definitions for struct perf_event, struct bpf_perf_link,
BPF_LINK_TYPE_PERF_EVENT (pids.bpf.c) and struct bpf_perf_event_value
(profiler.bpf.c).

This set contains patches 1 to 3 from Alexander Lobakin's series, "bpf:
random unpopular userspace fixes (32 bit et al)" (v2) [0], from April 2022.
An additional patch defines a local version of BPF_LINK_TYPE_PERF_EVENT in
bpftool's pids.bpf.c.

[0] https://lore.kernel.org/bpf/20220421003152.339542-1-alobakin@pm.me/

v2: Fixed description (CO-RE for container_of()) in patch 2.

Cc: Alexander Lobakin <aleksander.lobakin@intel.com>
Cc: Michal Suchánek <msuchanek@suse.de>

Alexander Lobakin (3):
  bpftool: use a local copy of perf_event to fix accessing ::bpf_cookie
  bpftool: define a local bpf_perf_link to fix accessing its fields
  bpftool: use a local bpf_perf_event_value to fix accessing its fields
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents c628747c 658ac068
......@@ -15,6 +15,19 @@ enum bpf_obj_type {
BPF_OBJ_BTF,
};
struct bpf_perf_link___local {
struct bpf_link link;
struct file *perf_file;
} __attribute__((preserve_access_index));
struct perf_event___local {
u64 bpf_cookie;
} __attribute__((preserve_access_index));
enum bpf_link_type___local {
BPF_LINK_TYPE_PERF_EVENT___local = 7,
};
extern const void bpf_link_fops __ksym;
extern const void bpf_map_fops __ksym;
extern const void bpf_prog_fops __ksym;
......@@ -41,10 +54,10 @@ static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
static __u64 get_bpf_cookie(struct bpf_link *link)
{
struct bpf_perf_link *perf_link;
struct perf_event *event;
struct bpf_perf_link___local *perf_link;
struct perf_event___local *event;
perf_link = container_of(link, struct bpf_perf_link, link);
perf_link = container_of(link, struct bpf_perf_link___local, link);
event = BPF_CORE_READ(perf_link, perf_file, private_data);
return BPF_CORE_READ(event, bpf_cookie);
}
......@@ -84,10 +97,13 @@ int iter(struct bpf_iter__task_file *ctx)
e.pid = task->tgid;
e.id = get_obj_id(file->private_data, obj_type);
if (obj_type == BPF_OBJ_LINK) {
if (obj_type == BPF_OBJ_LINK &&
bpf_core_enum_value_exists(enum bpf_link_type___local,
BPF_LINK_TYPE_PERF_EVENT___local)) {
struct bpf_link *link = (struct bpf_link *) file->private_data;
if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) {
if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
BPF_LINK_TYPE_PERF_EVENT___local)) {
e.has_bpf_cookie = true;
e.bpf_cookie = get_bpf_cookie(link);
}
......
......@@ -4,6 +4,12 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_perf_event_value___local {
__u64 counter;
__u64 enabled;
__u64 running;
} __attribute__((preserve_access_index));
/* map of perf event fds, num_cpu * num_metric entries */
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
......@@ -15,14 +21,14 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(value_size, sizeof(struct bpf_perf_event_value___local));
} fentry_readings SEC(".maps");
/* accumulated readings */
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(struct bpf_perf_event_value));
__uint(value_size, sizeof(struct bpf_perf_event_value___local));
} accum_readings SEC(".maps");
/* sample counts, one per cpu */
......@@ -39,7 +45,7 @@ const volatile __u32 num_metric = 1;
SEC("fentry/XXX")
int BPF_PROG(fentry_XXX)
{
struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
u32 key = bpf_get_smp_processor_id();
u32 i;
......@@ -53,10 +59,10 @@ int BPF_PROG(fentry_XXX)
}
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
struct bpf_perf_event_value reading;
struct bpf_perf_event_value___local reading;
int err;
err = bpf_perf_event_read_value(&events, key, &reading,
err = bpf_perf_event_read_value(&events, key, (void *)&reading,
sizeof(reading));
if (err)
return 0;
......@@ -68,14 +74,14 @@ int BPF_PROG(fentry_XXX)
}
static inline void
fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
{
struct bpf_perf_event_value *before, diff;
struct bpf_perf_event_value___local *before, diff;
before = bpf_map_lookup_elem(&fentry_readings, &id);
/* only account samples with a valid fentry_reading */
if (before && before->counter) {
struct bpf_perf_event_value *accum;
struct bpf_perf_event_value___local *accum;
diff.counter = after->counter - before->counter;
diff.enabled = after->enabled - before->enabled;
......@@ -93,7 +99,7 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
SEC("fexit/XXX")
int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
u32 cpu = bpf_get_smp_processor_id();
u32 i, zero = 0;
int err;
......@@ -102,7 +108,8 @@ int BPF_PROG(fexit_XXX)
/* read all events before updating the maps, to reduce error */
for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
readings + i, sizeof(*readings));
(void *)(readings + i),
sizeof(*readings));
if (err)
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment