Commit 4fcac46c authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

libbpf: only add BPF_F_MMAPABLE flag for data maps with global vars

Teach libbpf to not add BPF_F_MMAPABLE flag unnecessarily for ARRAY maps
that are backing data sections, if such data sections don't expose any
variables to user-space. Exposed variables are those that have
STB_GLOBAL or STB_WEAK ELF binding and correspond to BTF VAR's
BTF_VAR_GLOBAL_ALLOCATED linkage.

The overall idea is that if some data section doesn't have any variable that
is exposed through BPF skeleton, then there is no reason to make such
BPF array mmapable. Making BPF array mmapable is not a free no-op
action, because BPF verifier doesn't allow users to put special objects
(such as BPF spin locks, RB tree nodes, linked list nodes, kptrs, etc;
anything that has a sensitive internal state that should not be modified
arbitrarily from user space) into mmapable arrays, as there is no way to
prevent user space from corrupting such sensitive state through direct
memory access through memory-mapped region.

By making sure that libbpf doesn't add BPF_F_MMAPABLE flag to BPF array
maps corresponding to data sections that only have static variables
(which are not supposed to be visible to user space according to libbpf
and BPF skeleton rules), users now can have spinlocks, kptrs, etc in
either default .bss/.data sections or custom .data.* sections (assuming
there are no global variables in such sections).

The only possible hiccup with this approach is the need to use global
variables during BPF static linking, even if it's not intended to be
shared with user space through BPF skeleton. To allow such scenarios,
extend libbpf's STV_HIDDEN ELF visibility attribute handling to
variables. Libbpf is already treating global hidden BPF subprograms as
static subprograms and adjusts BTF accordingly to make BPF verifier
verify such subprograms as static subprograms with preserving entire BPF
verifier state between subprog calls. This patch teaches libbpf to treat
global hidden variables as static ones and adjust BTF information
accordingly as well. This allows to share variables between multiple
object files during static linking, but still keep them internal to BPF
program and not get them exposed through BPF skeleton.

Note, that if the user has some advanced scenario where they absolutely
need BPF_F_MMAPABLE flag on .data/.bss/.rodata BPF array map despite
only having static variables, they still can achieve this by forcing it
through explicit bpf_map__set_map_flags() API.
Acked-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarDave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20221019002816.359650-3-andrii@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent f33f742d
...@@ -1577,7 +1577,38 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name) ...@@ -1577,7 +1577,38 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name)
} }
static int static int
bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map); map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
/* Internal BPF map is mmap()'able only if at least one of corresponding
* DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
* variable and it's not marked as __hidden (which turns it into, effectively,
* a STATIC variable).
*/
static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
{
const struct btf_type *t, *vt;
struct btf_var_secinfo *vsi;
int i, n;
if (!map->btf_value_type_id)
return false;
t = btf__type_by_id(obj->btf, map->btf_value_type_id);
if (!btf_is_datasec(t))
return false;
vsi = btf_var_secinfos(t);
for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
vt = btf__type_by_id(obj->btf, vsi->type);
if (!btf_is_var(vt))
continue;
if (btf_var(vt)->linkage != BTF_VAR_STATIC)
return true;
}
return false;
}
static int static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
...@@ -1609,6 +1640,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, ...@@ -1609,6 +1640,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->max_entries = 1; def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
? BPF_F_RDONLY_PROG : 0; ? BPF_F_RDONLY_PROG : 0;
/* failures are fine because of maps like .rodata.str1.1 */
(void) map_fill_btf_type_info(obj, map);
if (map_is_mmapable(obj, map))
def->map_flags |= BPF_F_MMAPABLE; def->map_flags |= BPF_F_MMAPABLE;
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
...@@ -1626,9 +1662,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, ...@@ -1626,9 +1662,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
return err; return err;
} }
/* failures are fine because of maps like .rodata.str1.1 */
(void) bpf_map_find_btf_info(obj, map);
if (data) if (data)
memcpy(map->mmaped, data, data_sz); memcpy(map->mmaped, data, data_sz);
...@@ -2540,7 +2573,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, ...@@ -2540,7 +2573,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
fill_map_from_def(map->inner_map, &inner_def); fill_map_from_def(map->inner_map, &inner_def);
} }
err = bpf_map_find_btf_info(obj, map); err = map_fill_btf_type_info(obj, map);
if (err) if (err)
return err; return err;
...@@ -2848,6 +2881,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, ...@@ -2848,6 +2881,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
__u32 size = 0, i, vars = btf_vlen(t); __u32 size = 0, i, vars = btf_vlen(t);
const char *sec_name = btf__name_by_offset(btf, t->name_off); const char *sec_name = btf__name_by_offset(btf, t->name_off);
struct btf_var_secinfo *vsi; struct btf_var_secinfo *vsi;
bool fixup_offsets = false;
int err; int err;
if (!sec_name) { if (!sec_name) {
...@@ -2855,13 +2889,24 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, ...@@ -2855,13 +2889,24 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
return -ENOENT; return -ENOENT;
} }
/* extern-backing datasecs (.ksyms, .kconfig) have their size and /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
* variable offsets set at the previous step, so we skip any fixups * variable offsets set at the previous step. Further, not every
* for such sections * extern BTF VAR has corresponding ELF symbol preserved, so we skip
* all fixups altogether for such sections and go straight to sorting
* VARs within their DATASEC.
*/ */
if (t->size) if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
goto sort_vars; goto sort_vars;
/* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
* fix this up. But BPF static linker already fixes this up and fills
* all the sizes and offsets during static linking. So this step has
* to be optional. But the STV_HIDDEN handling is non-optional for any
* non-extern DATASEC, so the variable fixup loop below handles both
* functions at the same time, paying the cost of BTF VAR <-> ELF
* symbol matching just once.
*/
if (t->size == 0) {
err = find_elf_sec_sz(obj, sec_name, &size); err = find_elf_sec_sz(obj, sec_name, &size);
if (err || !size) { if (err || !size) {
pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n", pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
...@@ -2870,6 +2915,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, ...@@ -2870,6 +2915,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
} }
t->size = size; t->size = size;
fixup_offsets = true;
}
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
const struct btf_type *t_var; const struct btf_type *t_var;
...@@ -2901,7 +2948,19 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, ...@@ -2901,7 +2948,19 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
return -ENOENT; return -ENOENT;
} }
if (fixup_offsets)
vsi->offset = sym->st_value; vsi->offset = sym->st_value;
/* if variable is a global/weak symbol, but has restricted
* (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
* as static. This follows similar logic for functions (BPF
* subprogs) and influences libbpf's further decisions about
* whether to make global data BPF array maps as
* BPF_F_MMAPABLE.
*/
if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
|| ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
var->linkage = BTF_VAR_STATIC;
} }
sort_vars: sort_vars:
...@@ -4223,7 +4282,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat ...@@ -4223,7 +4282,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
return 0; return 0;
} }
static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
{ {
int id; int id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment