Commit 22bdf7d4 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-03-29

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Bug fix in BTF deduplication that was mishandling an equivalence
   comparison, from Andrii.

2) libbpf Makefile fixes to properly link against libelf for the shared
   object and to actually export AF_XDP's xsk.h header, from Björn.

3) Fix use after free in bpf inode eviction, from Daniel.

4) Fix a bug in skb creation out of cpumap redirect, from Jesper.

5) Remove an unnecessary and triggerable WARN_ONCE() in max number
   of call stack frames checking in verifier, from Paul.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 19c84744 676e4a6f
...@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()`` ...@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
for the type. The maximum value of ``BTF_INT_BITS()`` is 128. for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
for this int. For example, a bitfield struct member has: * btf member bit for this int. For example, a bitfield struct member has:
offset 100 from the start of the structure, * btf member pointing to an int * btf member bit offset 100 from the start of the structure,
type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` * btf member pointing to an int type,
* the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
Then in the struct memory layout, this member will occupy ``4`` bits starting Then in the struct memory layout, this member will occupy ``4`` bits starting
from bits ``100 + 2 = 102``. from bits ``100 + 2 = 102``.
Alternatively, the bitfield struct member can be the following to access the Alternatively, the bitfield struct member can be the following to access the
same bits as the above: same bits as the above:
* btf member bit offset 102, * btf member bit offset 102,
* btf member pointing to an int type, * btf member pointing to an int type,
* the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
......
...@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work) ...@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
struct xdp_frame *xdpf) struct xdp_frame *xdpf)
{ {
unsigned int hard_start_headroom;
unsigned int frame_size; unsigned int frame_size;
void *pkt_data_start; void *pkt_data_start;
struct sk_buff *skb; struct sk_buff *skb;
/* Part of headroom was reserved to xdpf */
hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
/* build_skb need to place skb_shared_info after SKB end, and /* build_skb need to place skb_shared_info after SKB end, and
* also want to know the memory "truesize". Thus, need to * also want to know the memory "truesize". Thus, need to
* know the memory frame size backing xdp_buff. * know the memory frame size backing xdp_buff.
...@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, ...@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* is not at a fixed memory location, with mixed length * is not at a fixed memory location, with mixed length
* packets, which is bad for cache-line hotness. * packets, which is bad for cache-line hotness.
*/ */
frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
pkt_data_start = xdpf->data - xdpf->headroom; pkt_data_start = xdpf->data - hard_start_headroom;
skb = build_skb(pkt_data_start, frame_size); skb = build_skb(pkt_data_start, frame_size);
if (!skb) if (!skb)
return NULL; return NULL;
skb_reserve(skb, xdpf->headroom); skb_reserve(skb, hard_start_headroom);
__skb_put(skb, xdpf->len); __skb_put(skb, xdpf->len);
if (xdpf->metasize) if (xdpf->metasize)
skb_metadata_set(skb, xdpf->metasize); skb_metadata_set(skb, xdpf->metasize);
...@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, ...@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* - RX ring dev queue index (skb_record_rx_queue) * - RX ring dev queue index (skb_record_rx_queue)
*/ */
/* Allow SKB to reuse area used by xdp_frame */
xdp_scrub_frame(xdpf);
return skb; return skb;
} }
......
...@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ ...@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
} }
EXPORT_SYMBOL(bpf_prog_get_type_path); EXPORT_SYMBOL(bpf_prog_get_type_path);
static void bpf_evict_inode(struct inode *inode)
{
enum bpf_type type;
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
if (!bpf_inode_type(inode, &type))
bpf_any_put(inode->i_private, type);
}
/* /*
* Display the mount options in /proc/mounts. * Display the mount options in /proc/mounts.
*/ */
...@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) ...@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
return 0; return 0;
} }
static void bpf_destroy_inode_deferred(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
enum bpf_type type;
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
if (!bpf_inode_type(inode, &type))
bpf_any_put(inode->i_private, type);
free_inode_nonrcu(inode);
}
static void bpf_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
}
static const struct super_operations bpf_super_ops = { static const struct super_operations bpf_super_ops = {
.statfs = simple_statfs, .statfs = simple_statfs,
.drop_inode = generic_delete_inode, .drop_inode = generic_delete_inode,
.show_options = bpf_show_options, .show_options = bpf_show_options,
.evict_inode = bpf_evict_inode, .destroy_inode = bpf_destroy_inode,
}; };
enum { enum {
......
...@@ -1897,8 +1897,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env) ...@@ -1897,8 +1897,9 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
} }
frame++; frame++;
if (frame >= MAX_CALL_FRAMES) { if (frame >= MAX_CALL_FRAMES) {
WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); verbose(env, "the call stack of %d frames is too deep !\n",
return -EFAULT; frame);
return -E2BIG;
} }
goto process_func; goto process_func;
} }
......
...@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) ...@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
$(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@ -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so @ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
...@@ -220,8 +220,9 @@ install_lib: all_cmd ...@@ -220,8 +220,9 @@ install_lib: all_cmd
install_headers: install_headers:
$(call QUIET_INSTALL, headers) \ $(call QUIET_INSTALL, headers) \
$(call do_install,bpf.h,$(prefix)/include/bpf,644); \ $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
$(call do_install,libbpf.h,$(prefix)/include/bpf,644); $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
$(call do_install,btf.h,$(prefix)/include/bpf,644); $(call do_install,btf.h,$(prefix)/include/bpf,644); \
$(call do_install,xsk.h,$(prefix)/include/bpf,644);
install: install_lib install: install_lib
......
...@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, ...@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return fwd_kind == real_kind; return fwd_kind == real_kind;
} }
if (cand_kind != canon_kind)
return 0;
switch (cand_kind) { switch (cand_kind) {
case BTF_KIND_INT: case BTF_KIND_INT:
return btf_equal_int(cand_type, canon_type); return btf_equal_int(cand_type, canon_type);
......
...@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = { ...@@ -5776,6 +5776,53 @@ const struct btf_dedup_test dedup_tests[] = {
.dedup_table_size = 1, /* force hash collisions */ .dedup_table_size = 1, /* force hash collisions */
}, },
}, },
{
.descr = "dedup: void equiv check",
/*
* // CU 1:
* struct s {
* struct {} *x;
* };
* // CU 2:
* struct s {
* int *x;
* };
*/
.input = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
/* CU 2 */
BTF_PTR_ENC(0), /* [4] ptr -> void */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0x"),
},
.expect = {
.raw_types = {
/* CU 1 */
BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
BTF_PTR_ENC(1), /* [2] ptr -> [1] */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
/* CU 2 */
BTF_PTR_ENC(0), /* [4] ptr -> void */
BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
BTF_END_RAW,
},
BTF_STR_SEC("\0s\0x"),
},
.opts = {
.dont_resolve_fwds = false,
.dedup_table_size = 1, /* force hash collisions */
},
},
{ {
.descr = "dedup: all possible kinds (no duplicates)", .descr = "dedup: all possible kinds (no duplicates)",
.input = { .input = {
......
...@@ -907,6 +907,44 @@ ...@@ -907,6 +907,44 @@
.errstr = "call stack", .errstr = "call stack",
.result = REJECT, .result = REJECT,
}, },
{
"calls: stack depth check in dead code",
.insns = {
/* main */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
BPF_EXIT_INSN(),
/* A */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* B */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
BPF_EXIT_INSN(),
/* C */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
BPF_EXIT_INSN(),
/* D */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
BPF_EXIT_INSN(),
/* E */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
BPF_EXIT_INSN(),
/* F */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
BPF_EXIT_INSN(),
/* G */
BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
BPF_EXIT_INSN(),
/* H */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_XDP,
.errstr = "call stack",
.result = REJECT,
},
{ {
"calls: spill into caller stack frame", "calls: spill into caller stack frame",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment