Commit 6436408e authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-01-20

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix a out-of-bounds access in __bpf_redirect_no_mac, from Willem.

2) Fix bpf_setsockopt to reset sock dst on SO_MARK changes, from Peter.

3) Fix map in map masking to prevent out-of-bounds access under
   speculative execution, from Daniel.

4) Fix bpf_setsockopt's SO_MAX_PACING_RATE to support TCP internal
   pacing, from Yuchung.

5) Fix json writer license in bpftool, from Thomas.

6) Fix AF_XDP to check if an actually queue exists during umem
   setup, from Krzysztof.

7) Several fixes to BPF stackmap's build id handling. Another fix
   for bpftool build to account for libbfd variations wrt linking
   requirements, from Stanislav.

8) Fix BPF samples build with clang by working around missing asm
   goto, from Yonghong.

9) Fix libbpf to retry program load on signal interrupt, from Lorenz.

10) Various minor compile warning fixes in BPF code, from Mathieu.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents df133f3f e7c87bd6
...@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) ...@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
return kind_ops[BTF_INFO_KIND(t->info)]; return kind_ops[BTF_INFO_KIND(t->info)];
} }
bool btf_name_offset_valid(const struct btf *btf, u32 offset) static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{ {
return BTF_STR_OFFSET_VALID(offset) && return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len; offset < btf->hdr.str_len;
......
...@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_trace_printk: case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN)) if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto(); return bpf_get_trace_printk_proto();
/* fall through */
default: default:
return NULL; return NULL;
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
{ {
struct bpf_map *inner_map, *inner_map_meta; struct bpf_map *inner_map, *inner_map_meta;
u32 inner_map_meta_size;
struct fd f; struct fd f;
f = fdget(inner_map_ufd); f = fdget(inner_map_ufd);
...@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); inner_map_meta_size = sizeof(*inner_map_meta);
/* In some cases verifier needs to access beyond just base map. */
if (inner_map->ops == &array_map_ops)
inner_map_meta_size = sizeof(struct bpf_array);
inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
if (!inner_map_meta) { if (!inner_map_meta) {
fdput(f); fdput(f);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta->key_size = inner_map->key_size; inner_map_meta->key_size = inner_map->key_size;
inner_map_meta->value_size = inner_map->value_size; inner_map_meta->value_size = inner_map->value_size;
inner_map_meta->map_flags = inner_map->map_flags; inner_map_meta->map_flags = inner_map->map_flags;
inner_map_meta->ops = inner_map->ops;
inner_map_meta->max_entries = inner_map->max_entries; inner_map_meta->max_entries = inner_map->max_entries;
/* Misc members not needed in bpf_map_meta_equal() check. */
inner_map_meta->ops = inner_map->ops;
if (inner_map->ops == &array_map_ops) {
inner_map_meta->unpriv_array = inner_map->unpriv_array;
container_of(inner_map_meta, struct bpf_array, map)->index_mask =
container_of(inner_map, struct bpf_array, map)->index_mask;
}
fdput(f); fdput(f);
return inner_map_meta; return inner_map_meta;
} }
......
...@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr, ...@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
if (nhdr->n_type == BPF_BUILD_ID && if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") && nhdr->n_namesz == sizeof("GNU") &&
nhdr->n_descsz == BPF_BUILD_ID_SIZE) { nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id, memcpy(build_id,
note_start + note_offs + note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
BPF_BUILD_ID_SIZE); nhdr->n_descsz);
memset(build_id + nhdr->n_descsz, 0,
BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return 0; return 0;
} }
new_offs = note_offs + sizeof(Elf32_Nhdr) + new_offs = note_offs + sizeof(Elf32_Nhdr) +
...@@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, ...@@ -311,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
for (i = 0; i < trace_nr; i++) { for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i]; id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
} }
return; return;
} }
...@@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, ...@@ -321,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* per entry fall back to ips */ /* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i]; id_offs[i].ip = ips[i];
memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
continue; continue;
} }
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
......
...@@ -2020,9 +2020,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) ...@@ -2020,9 +2020,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags) u32 flags)
{ {
/* skb->mac_len is not set on normal egress */ unsigned int mlen = skb_network_offset(skb);
unsigned int mlen = skb->network_header - skb->mac_header;
if (mlen) {
__skb_pull(skb, mlen); __skb_pull(skb, mlen);
/* At ingress, the mac header has already been pulled once. /* At ingress, the mac header has already been pulled once.
...@@ -2032,6 +2032,7 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, ...@@ -2032,6 +2032,7 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
*/ */
if (!skb_at_tc_ingress(skb)) if (!skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
}
skb_pop_mac_header(skb); skb_pop_mac_header(skb);
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ? return flags & BPF_F_INGRESS ?
...@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, ...@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break; break;
case SO_MAX_PACING_RATE: /* 32bit version */ case SO_MAX_PACING_RATE: /* 32bit version */
if (val != ~0U)
cmpxchg(&sk->sk_pacing_status,
SK_PACING_NONE,
SK_PACING_NEEDED);
sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk->sk_max_pacing_rate); sk->sk_max_pacing_rate);
...@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, ...@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_rcvlowat = val ? : 1; sk->sk_rcvlowat = val ? : 1;
break; break;
case SO_MARK: case SO_MARK:
if (sk->sk_mark != val) {
sk->sk_mark = val; sk->sk_mark = val;
sk_dst_reset(sk);
}
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id) ...@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_trace_printk: case BPF_FUNC_trace_printk:
if (capable(CAP_SYS_ADMIN)) if (capable(CAP_SYS_ADMIN))
return bpf_get_trace_printk_proto(); return bpf_get_trace_printk_proto();
/* else: fall through */ /* else, fall through */
default: default:
return NULL; return NULL;
} }
......
...@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, ...@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
lwt->name ? : "<unknown>"); lwt->name ? : "<unknown>");
ret = BPF_OK; ret = BPF_OK;
} else { } else {
skb_reset_mac_header(skb);
ret = skb_do_redirect(skb); ret = skb_do_redirect(skb);
if (ret == 0) if (ret == 0)
ret = BPF_REDIRECT; ret = BPF_REDIRECT;
......
...@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) ...@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite. * not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time. * This might also change during run time.
*/ */
static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
u16 queue_id) u16 queue_id)
{ {
if (queue_id >= max_t(unsigned int,
dev->real_num_rx_queues,
dev->real_num_tx_queues))
return -EINVAL;
if (queue_id < dev->real_num_rx_queues) if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].umem = umem; dev->_rx[queue_id].umem = umem;
if (queue_id < dev->real_num_tx_queues) if (queue_id < dev->real_num_tx_queues)
dev->_tx[queue_id].umem = umem; dev->_tx[queue_id].umem = umem;
return 0;
} }
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
...@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, ...@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
goto out_rtnl_unlock; goto out_rtnl_unlock;
} }
xdp_reg_umem_at_qid(dev, umem, queue_id); err = xdp_reg_umem_at_qid(dev, umem, queue_id);
if (err)
goto out_rtnl_unlock;
umem->dev = dev; umem->dev = dev;
umem->queue_id = queue_id; umem->queue_id = queue_id;
if (force_copy) if (force_copy)
......
...@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c ...@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \ -Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y) ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@ $(BTF_PAHOLE) -J $@
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Facebook */
#ifndef __ASM_GOTO_WORKAROUND_H
#define __ASM_GOTO_WORKAROUND_H
/* this will bring in asm_volatile_goto macro definition
* if enabled by compiler and config options.
*/
#include <linux/types.h>
#ifdef asm_volatile_goto
#undef asm_volatile_goto
#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
#endif
#endif
...@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c ...@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c
SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
ifeq ($(feature-libbfd),1) ifeq ($(feature-libbfd),1)
LIBS += -lbfd -ldl -lopcodes
else ifeq ($(feature-libbfd-liberty),1)
LIBS += -lbfd -ldl -lopcodes -liberty
else ifeq ($(feature-libbfd-liberty-z),1)
LIBS += -lbfd -ldl -lopcodes -liberty -lz
endif
ifneq ($(filter -lbfd,$(LIBS)),)
CFLAGS += -DHAVE_LIBBFD_SUPPORT CFLAGS += -DHAVE_LIBBFD_SUPPORT
SRCS += $(BFD_SRCS) SRCS += $(BFD_SRCS)
LIBS += -lbfd -lopcodes
endif endif
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/* /*
* Simple streaming JSON writer * Simple streaming JSON writer
* *
* This takes care of the annoying bits of JSON syntax like the commas * This takes care of the annoying bits of JSON syntax like the commas
* after elements * after elements
* *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Stephen Hemminger <stephen@networkplumber.org> * Authors: Stephen Hemminger <stephen@networkplumber.org>
*/ */
......
...@@ -5,11 +5,6 @@ ...@@ -5,11 +5,6 @@
* This takes care of the annoying bits of JSON syntax like the commas * This takes care of the annoying bits of JSON syntax like the commas
* after elements * after elements
* *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Stephen Hemminger <stephen@networkplumber.org> * Authors: Stephen Hemminger <stephen@networkplumber.org>
*/ */
......
This diff is collapsed.
...@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, ...@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size); return syscall(__NR_bpf, cmd, attr, size);
} }
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
{
int fd;
do {
fd = sys_bpf(BPF_PROG_LOAD, attr, size);
} while (fd < 0 && errno == EAGAIN);
return fd;
}
int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{ {
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
...@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, ...@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
memcpy(attr.prog_name, load_attr->name, memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1)); min(name_len, BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0) if (fd >= 0)
return fd; return fd;
...@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, ...@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
break; break;
} }
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); fd = sys_bpf_prog_load(&attr, sizeof(attr));
if (fd >= 0) if (fd >= 0)
goto done; goto done;
...@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, ...@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_size = log_buf_sz; attr.log_size = log_buf_sz;
attr.log_level = 1; attr.log_level = 1;
log_buf[0] = 0; log_buf[0] = 0;
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); fd = sys_bpf_prog_load(&attr, sizeof(attr));
done: done:
free(finfo); free(finfo);
free(linfo); free(linfo);
...@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, ...@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version; attr.kern_version = kern_version;
attr.prog_flags = prog_flags; attr.prog_flags = prog_flags;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); return sys_bpf_prog_load(&attr, sizeof(attr));
} }
int bpf_map_update_elem(int fd, const void *key, const void *value, int bpf_map_update_elem(int fd, const void *key, const void *value,
......
...@@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \ ...@@ -56,6 +56,7 @@ TEST_PROGS := test_kmod.sh \
test_xdp_vlan.sh test_xdp_vlan.sh
TEST_PROGS_EXTENDED := with_addr.sh \ TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
tcp_client.py \ tcp_client.py \
tcp_server.py tcp_server.py
......
...@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void) ...@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void)
int i, j; int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0; int build_id_matches = 0;
int retry = 1;
retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto out; goto out;
...@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void) ...@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void)
previous_key = key; previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
close(pmu_fd);
bpf_object__close(obj);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
}
if (CHECK(build_id_matches < 1, "build id match", if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n")) "Didn't find expected build ID from the map\n"))
goto disable_pmu; goto disable_pmu;
...@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void) ...@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void)
int i, j; int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0; int build_id_matches = 0;
int retry = 1;
retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return; return;
...@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void) ...@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void)
previous_key = key; previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
* try it one more time.
*/
if (build_id_matches < 1 && retry--) {
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
close(pmu_fd);
bpf_object__close(obj);
printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
__func__);
goto retry;
}
if (CHECK(build_id_matches < 1, "build id match", if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n")) "Didn't find expected build ID from the map\n"))
goto disable_pmu; goto disable_pmu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment