Commit e7b81641 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2019-01-31

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) disable preemption in sender side of socket filters, from Alexei.

2) fix two potential deadlocks in syscall bpf lookup and prog_register,
   from Martin and Alexei.

3) fix BTF to allow typedef on func_proto, from Yonghong.

4) two bpftool fixes, from Jiri and Paolo.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9b1f19d8 f01c2803
...@@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h ...@@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h
BPF (Safe dynamic programs and tools) BPF (Safe dynamic programs and tools)
M: Alexei Starovoitov <ast@kernel.org> M: Alexei Starovoitov <ast@kernel.org>
M: Daniel Borkmann <daniel@iogearbox.net> M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
...@@ -2873,6 +2876,8 @@ F: samples/bpf/ ...@@ -2873,6 +2876,8 @@ F: samples/bpf/
F: tools/bpf/ F: tools/bpf/
F: tools/lib/bpf/ F: tools/lib/bpf/
F: tools/testing/selftests/bpf/ F: tools/testing/selftests/bpf/
K: bpf
N: bpf
BPF JIT for ARM BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com> M: Shubham Bansal <illusionist.neo@gmail.com>
......
...@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) ...@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data; return qdisc_skb_cb(skb)->data;
} }
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u8 *cb_data = bpf_skb_cb(skb); u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN]; u8 cb_saved[BPF_SKB_CB_LEN];
...@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, ...@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res; return res;
} }
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u32 res;
preempt_disable();
res = __bpf_prog_run_save_cb(prog, skb);
preempt_enable();
return res;
}
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u8 *cb_data = bpf_skb_cb(skb); u8 *cb_data = bpf_skb_cb(skb);
u32 res;
if (unlikely(prog->cb_access)) if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN); memset(cb_data, 0, BPF_SKB_CB_LEN);
return BPF_PROG_RUN(prog, skb); preempt_disable();
res = BPF_PROG_RUN(prog, skb);
preempt_enable();
return res;
} }
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
......
...@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, ...@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
/* "typedef void new_void", "const void"...etc */ /* "typedef void new_void", "const void"...etc */
if (!btf_type_is_void(next_type) && if (!btf_type_is_void(next_type) &&
!btf_type_is_fwd(next_type)) { !btf_type_is_fwd(next_type) &&
!btf_type_is_func_proto(next_type)) {
btf_verifier_log_type(env, v->t, "Invalid type_id"); btf_verifier_log_type(env, v->t, "Invalid type_id");
return -EINVAL; return -EINVAL;
} }
......
...@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, ...@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
bpf_compute_and_save_data_end(skb, &saved_data_end); bpf_compute_and_save_data_end(skb, &saved_data_end);
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
bpf_prog_run_save_cb); __bpf_prog_run_save_cb);
bpf_restore_data_end(skb, saved_data_end); bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset); __skb_pull(skb, offset);
skb->sk = save_sk; skb->sk = save_sk;
......
...@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) ...@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
} }
if (htab_is_prealloc(htab)) { if (htab_is_prealloc(htab)) {
pcpu_freelist_push(&htab->freelist, &l->fnode); __pcpu_freelist_push(&htab->freelist, &l->fnode);
} else { } else {
atomic_dec(&htab->count); atomic_dec(&htab->count);
l->htab = htab; l->htab = htab;
...@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
} else { } else {
struct pcpu_freelist_node *l; struct pcpu_freelist_node *l;
l = pcpu_freelist_pop(&htab->freelist); l = __pcpu_freelist_pop(&htab->freelist);
if (!l) if (!l)
return ERR_PTR(-E2BIG); return ERR_PTR(-E2BIG);
l_new = container_of(l, struct htab_elem, fnode); l_new = container_of(l, struct htab_elem, fnode);
......
...@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) ...@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
free_percpu(s->freelist); free_percpu(s->freelist);
} }
static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
struct pcpu_freelist_node *node) struct pcpu_freelist_node *node)
{ {
raw_spin_lock(&head->lock); raw_spin_lock(&head->lock);
node->next = head->first; node->next = head->first;
...@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, ...@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
raw_spin_unlock(&head->lock); raw_spin_unlock(&head->lock);
} }
void pcpu_freelist_push(struct pcpu_freelist *s, void __pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node) struct pcpu_freelist_node *node)
{ {
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
__pcpu_freelist_push(head, node); ___pcpu_freelist_push(head, node);
}
void pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node)
{
unsigned long flags;
local_irq_save(flags);
__pcpu_freelist_push(s, node);
local_irq_restore(flags);
} }
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
...@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, ...@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
again: again:
head = per_cpu_ptr(s->freelist, cpu); head = per_cpu_ptr(s->freelist, cpu);
__pcpu_freelist_push(head, buf); ___pcpu_freelist_push(head, buf);
i++; i++;
buf += elem_size; buf += elem_size;
if (i == nr_elems) if (i == nr_elems)
...@@ -74,14 +84,12 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, ...@@ -74,14 +84,12 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
local_irq_restore(flags); local_irq_restore(flags);
} }
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
{ {
struct pcpu_freelist_head *head; struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node; struct pcpu_freelist_node *node;
unsigned long flags;
int orig_cpu, cpu; int orig_cpu, cpu;
local_irq_save(flags);
orig_cpu = cpu = raw_smp_processor_id(); orig_cpu = cpu = raw_smp_processor_id();
while (1) { while (1) {
head = per_cpu_ptr(s->freelist, cpu); head = per_cpu_ptr(s->freelist, cpu);
...@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) ...@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
node = head->first; node = head->first;
if (node) { if (node) {
head->first = node->next; head->first = node->next;
raw_spin_unlock_irqrestore(&head->lock, flags); raw_spin_unlock(&head->lock);
return node; return node;
} }
raw_spin_unlock(&head->lock); raw_spin_unlock(&head->lock);
cpu = cpumask_next(cpu, cpu_possible_mask); cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
cpu = 0; cpu = 0;
if (cpu == orig_cpu) { if (cpu == orig_cpu)
local_irq_restore(flags);
return NULL; return NULL;
}
} }
} }
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_node *ret;
unsigned long flags;
local_irq_save(flags);
ret = __pcpu_freelist_pop(s);
local_irq_restore(flags);
return ret;
}
...@@ -22,8 +22,12 @@ struct pcpu_freelist_node { ...@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
struct pcpu_freelist_node *next; struct pcpu_freelist_node *next;
}; };
/* pcpu_freelist_* do spin_lock_irqsave. */
void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems); u32 nr_elems);
int pcpu_freelist_init(struct pcpu_freelist *); int pcpu_freelist_init(struct pcpu_freelist *);
......
...@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr) ...@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) { if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_lookup_elem(map, key, value); err = bpf_map_offload_lookup_elem(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || goto done;
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { }
preempt_disable();
this_cpu_inc(bpf_prog_active);
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value); err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value); err = bpf_percpu_array_copy(map, key, value);
...@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr) ...@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr)
} }
rcu_read_unlock(); rcu_read_unlock();
} }
this_cpu_dec(bpf_prog_active);
preempt_enable();
done:
if (err) if (err)
goto free_value; goto free_value;
......
...@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * ...@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{ {
int err; return __bpf_probe_register(btp, prog);
mutex_lock(&bpf_event_mutex);
err = __bpf_probe_register(btp, prog);
mutex_unlock(&bpf_event_mutex);
return err;
} }
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{ {
int err; return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
mutex_lock(&bpf_event_mutex);
err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
mutex_unlock(&bpf_event_mutex);
return err;
} }
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
......
...@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, ...@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some socketops are supported */ /* Only some socketops are supported */
switch (optname) { switch (optname) {
case SO_RCVBUF: case SO_RCVBUF:
val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break; break;
case SO_SNDBUF: case SO_SNDBUF:
val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break; break;
......
...@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) ...@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc); struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */ /* No sk_callback_lock since already detached. */
if (psock->parser.enabled) strp_done(&psock->parser.strp);
strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work); cancel_work_sync(&psock->work);
......
...@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key) ...@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r"); fdi = fopen(path, "r");
if (!fdi) { if (!fdi)
p_err("can't open fdinfo: %s", strerror(errno));
return NULL; return NULL;
}
while ((n = getline(&line, &line_n, fdi)) > 0) { while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value; char *value;
...@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key) ...@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
value = strchr(line, '\t'); value = strchr(line, '\t');
if (!value || !value[1]) { if (!value || !value[1]) {
p_err("malformed fdinfo!?");
free(line); free(line);
return NULL; return NULL;
} }
...@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key) ...@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
return line; return line;
} }
p_err("key '%s' not found in fdinfo", key);
free(line); free(line);
fclose(fdi); fclose(fdi);
return NULL; return NULL;
......
...@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val, ...@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
return argv + i; return argv + i;
} }
/* on per cpu maps we must copy the provided value on all value instances */
static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
{
unsigned int i, n, step;
if (!map_is_per_cpu(info->type))
return;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
for (i = 1; i < n; i++)
memcpy(value + i * step, value, info->value_size);
}
static int parse_elem(char **argv, struct bpf_map_info *info, static int parse_elem(char **argv, struct bpf_map_info *info,
void *key, void *value, __u32 key_size, __u32 value_size, void *key, void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd) __u32 *flags, __u32 **value_fd)
...@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info, ...@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
argv = parse_bytes(argv, "value", value, value_size); argv = parse_bytes(argv, "value", value, value_size);
if (!argv) if (!argv)
return -1; return -1;
fill_per_cpu_value(info, value);
} }
return parse_elem(argv, info, key, NULL, key_size, value_size, return parse_elem(argv, info, key, NULL, key_size, value_size,
...@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) ...@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_uint_field(json_wtr, "owner_prog_type", jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type); prog_type);
} }
if (atoi(owner_jited)) if (owner_jited)
jsonw_bool_field(json_wtr, "owner_jited", true); jsonw_bool_field(json_wtr, "owner_jited",
else !!atoi(owner_jited));
jsonw_bool_field(json_wtr, "owner_jited", false);
free(owner_prog_type); free(owner_prog_type);
free(owner_jited); free(owner_jited);
...@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) ...@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
char *owner_jited = get_fdinfo(fd, "owner_jited"); char *owner_jited = get_fdinfo(fd, "owner_jited");
printf("\n\t"); if (owner_prog_type || owner_jited)
printf("\n\t");
if (owner_prog_type) { if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type); unsigned int prog_type = atoi(owner_prog_type);
...@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) ...@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
else else
printf("owner_prog_type %d ", prog_type); printf("owner_prog_type %d ", prog_type);
} }
if (atoi(owner_jited)) if (owner_jited)
printf("owner jited"); printf("owner%s jited",
else atoi(owner_jited) ? "" : " not");
printf("owner not jited");
free(owner_prog_type); free(owner_prog_type);
free(owner_jited); free(owner_jited);
......
...@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) ...@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
static int prog_fd_by_tag(unsigned char *tag) static int prog_fd_by_tag(unsigned char *tag)
{ {
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
unsigned int id = 0; unsigned int id = 0;
int err; int err;
int fd; int fd;
while (true) { while (true) {
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
err = bpf_prog_get_next_id(id, &id); err = bpf_prog_get_next_id(id, &id);
if (err) { if (err) {
p_err("%s", strerror(errno)); p_err("%s", strerror(errno));
......
...@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void) ...@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
unsigned int start, end, possible_cpus = 0; unsigned int start, end, possible_cpus = 0;
char buff[128]; char buff[128];
FILE *fp; FILE *fp;
int n; int len, n, i, j = 0;
fp = fopen(fcpu, "r"); fp = fopen(fcpu, "r");
if (!fp) { if (!fp) {
...@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void) ...@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
exit(1); exit(1);
} }
while (fgets(buff, sizeof(buff), fp)) { if (!fgets(buff, sizeof(buff), fp)) {
n = sscanf(buff, "%u-%u", &start, &end); printf("Failed to read %s!\n", fcpu);
if (n == 0) { exit(1);
printf("Failed to retrieve # possible CPUs!\n"); }
exit(1);
} else if (n == 1) { len = strlen(buff);
end = start; for (i = 0; i <= len; i++) {
if (buff[i] == ',' || buff[i] == '\0') {
buff[i] = '\0';
n = sscanf(&buff[j], "%u-%u", &start, &end);
if (n <= 0) {
printf("Failed to retrieve # possible CPUs!\n");
exit(1);
} else if (n == 1) {
end = start;
}
possible_cpus += end - start + 1;
j = i + 1;
} }
possible_cpus = start == 0 ? end + 1 : 0;
break;
} }
fclose(fp); fclose(fp);
return possible_cpus; return possible_cpus;
......
...@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = { ...@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = {
}, },
{ {
.descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
.raw_types = { .raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
BTF_CONST_ENC(4), /* [3] */ BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ BTF_FUNC_PROTO_ENC(0, 2), /* [4] */
BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
BTF_FUNC_PROTO_ARG_ENC(0, 1), BTF_FUNC_PROTO_ARG_ENC(0, 1),
BTF_FUNC_PROTO_ARG_ENC(0, 2), BTF_FUNC_PROTO_ARG_ENC(0, 2),
BTF_END_RAW, BTF_END_RAW,
...@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = { ...@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = {
.key_type_id = 1, .key_type_id = 1,
.value_type_id = 1, .value_type_id = 1,
.max_entries = 4, .max_entries = 4,
.btf_load_err = true,
.err_str = "Invalid type_id",
}, },
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment