Commit 500322ec authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-push-pop-helpers'

Alexei Starovoitov says:

====================
bpf: introduce bpf_skb_vlan_push/pop() helpers

Let TC+eBPF programs call skb_vlan_push/pop via helpers.

v1->v2:
- reworded commit log to better explain correctness of re-caching
  and fixed comparison of mixed endiannes (suggested by Eric)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f3120acc 4d9c5c53
...@@ -973,6 +973,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i ...@@ -973,6 +973,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
*/ */
const u64 func = (u64)__bpf_call_base + imm; const u64 func = (u64)__bpf_call_base + imm;
if (bpf_helper_changes_skb_data((void *)func))
/* TODO reload skb->data, hlen */
return -1;
REG_SET_SEEN(BPF_REG_5); REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC; jit->seen |= SEEN_FUNC;
/* lg %w1,<d(imm)>(%l) */ /* lg %w1,<d(imm)>(%l) */
......
...@@ -315,6 +315,26 @@ static void emit_bpf_tail_call(u8 **pprog) ...@@ -315,6 +315,26 @@ static void emit_bpf_tail_call(u8 **pprog)
*pprog = prog; *pprog = prog;
} }
static void emit_load_skb_data_hlen(u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
/* r9d = skb->len - skb->data_len (headlen)
* r10 = skb->data
*/
/* mov %r9d, off32(%rdi) */
EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
/* sub %r9d, off32(%rdi) */
EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
/* mov %r10, off32(%rdi) */
EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
*pprog = prog;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx) int oldproglen, struct jit_context *ctx)
{ {
...@@ -329,36 +349,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -329,36 +349,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
emit_prologue(&prog); emit_prologue(&prog);
if (seen_ld_abs) { if (seen_ld_abs)
/* r9d : skb->len - skb->data_len (headlen) emit_load_skb_data_hlen(&prog);
* r10 : skb->data
*/
if (is_imm8(offsetof(struct sk_buff, len)))
/* mov %r9d, off8(%rdi) */
EMIT4(0x44, 0x8b, 0x4f,
offsetof(struct sk_buff, len));
else
/* mov %r9d, off32(%rdi) */
EMIT3_off32(0x44, 0x8b, 0x8f,
offsetof(struct sk_buff, len));
if (is_imm8(offsetof(struct sk_buff, data_len)))
/* sub %r9d, off8(%rdi) */
EMIT4(0x44, 0x2b, 0x4f,
offsetof(struct sk_buff, data_len));
else
EMIT3_off32(0x44, 0x2b, 0x8f,
offsetof(struct sk_buff, data_len));
if (is_imm8(offsetof(struct sk_buff, data)))
/* mov %r10, off8(%rdi) */
EMIT4(0x4c, 0x8b, 0x57,
offsetof(struct sk_buff, data));
else
/* mov %r10, off32(%rdi) */
EMIT3_off32(0x4c, 0x8b, 0x97,
offsetof(struct sk_buff, data));
}
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
const s32 imm32 = insn->imm; const s32 imm32 = insn->imm;
...@@ -367,6 +359,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, ...@@ -367,6 +359,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
u8 b1 = 0, b2 = 0, b3 = 0; u8 b1 = 0, b2 = 0, b3 = 0;
s64 jmp_offset; s64 jmp_offset;
u8 jmp_cond; u8 jmp_cond;
bool reload_skb_data;
int ilen; int ilen;
u8 *func; u8 *func;
...@@ -818,12 +811,18 @@ xadd: if (is_imm8(insn->off)) ...@@ -818,12 +811,18 @@ xadd: if (is_imm8(insn->off))
func = (u8 *) __bpf_call_base + imm32; func = (u8 *) __bpf_call_base + imm32;
jmp_offset = func - (image + addrs[i]); jmp_offset = func - (image + addrs[i]);
if (seen_ld_abs) { if (seen_ld_abs) {
EMIT2(0x41, 0x52); /* push %r10 */ reload_skb_data = bpf_helper_changes_skb_data(func);
EMIT2(0x41, 0x51); /* push %r9 */ if (reload_skb_data) {
/* need to adjust jmp offset, since EMIT1(0x57); /* push %rdi */
* pop %r9, pop %r10 take 4 bytes after call insn jmp_offset += 22; /* pop, mov, sub, mov */
*/ } else {
jmp_offset += 4; EMIT2(0x41, 0x52); /* push %r10 */
EMIT2(0x41, 0x51); /* push %r9 */
/* need to adjust jmp offset, since
* pop %r9, pop %r10 take 4 bytes after call insn
*/
jmp_offset += 4;
}
} }
if (!imm32 || !is_simm32(jmp_offset)) { if (!imm32 || !is_simm32(jmp_offset)) {
pr_err("unsupported bpf func %d addr %p image %p\n", pr_err("unsupported bpf func %d addr %p image %p\n",
...@@ -832,8 +831,13 @@ xadd: if (is_imm8(insn->off)) ...@@ -832,8 +831,13 @@ xadd: if (is_imm8(insn->off))
} }
EMIT1_off32(0xE8, jmp_offset); EMIT1_off32(0xE8, jmp_offset);
if (seen_ld_abs) { if (seen_ld_abs) {
EMIT2(0x41, 0x59); /* pop %r9 */ if (reload_skb_data) {
EMIT2(0x41, 0x5A); /* pop %r10 */ EMIT1(0x5F); /* pop %rdi */
emit_load_skb_data_hlen(&prog);
} else {
EMIT2(0x41, 0x59); /* pop %r9 */
EMIT2(0x41, 0x5A); /* pop %r10 */
}
} }
break; break;
......
...@@ -192,5 +192,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto; ...@@ -192,5 +192,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
#endif /* _LINUX_BPF_H */ #endif /* _LINUX_BPF_H */
...@@ -411,6 +411,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); ...@@ -411,6 +411,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp); void bpf_int_jit_compile(struct bpf_prog *fp);
bool bpf_helper_changes_skb_data(void *func);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
......
...@@ -256,6 +256,8 @@ enum bpf_func_id { ...@@ -256,6 +256,8 @@ enum bpf_func_id {
* Return: classid if != 0 * Return: classid if != 0
*/ */
BPF_FUNC_get_cgroup_classid, BPF_FUNC_get_cgroup_classid,
BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
__BPF_FUNC_MAX_ID, __BPF_FUNC_MAX_ID,
}; };
......
...@@ -177,6 +177,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ...@@ -177,6 +177,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{ {
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(__bpf_call_base);
/** /**
* __bpf_prog_run - run eBPF program on a given context * __bpf_prog_run - run eBPF program on a given context
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
...@@ -355,6 +356,81 @@ static int bpf_fill_ja(struct bpf_test *self) ...@@ -355,6 +356,81 @@ static int bpf_fill_ja(struct bpf_test *self)
return __bpf_fill_ja(self, 12, 9); return __bpf_fill_ja(self, 12, 9);
} }
static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 1; i += 2) {
insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_CPU);
}
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
#define PUSH_CNT 68
/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct bpf_insn *insn;
int i = 0, j, k = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[i++] = BPF_MOV64_REG(R6, R1);
loop:
for (j = 0; j < PUSH_CNT; j++) {
insn[i++] = BPF_LD_ABS(BPF_B, 0);
insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
i++;
insn[i++] = BPF_MOV64_REG(R1, R6);
insn[i++] = BPF_MOV64_IMM(R2, 1);
insn[i++] = BPF_MOV64_IMM(R3, 2);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
bpf_skb_vlan_push_proto.func - __bpf_call_base);
insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
i++;
}
for (j = 0; j < PUSH_CNT; j++) {
insn[i++] = BPF_LD_ABS(BPF_B, 0);
insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
i++;
insn[i++] = BPF_MOV64_REG(R1, R6);
insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
bpf_skb_vlan_pop_proto.func - __bpf_call_base);
insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
i++;
}
if (++k < 5)
goto loop;
for (; i < len - 1; i++)
insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
insn[len - 1] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static struct bpf_test tests[] = { static struct bpf_test tests[] = {
{ {
"TAX", "TAX",
...@@ -4398,6 +4474,22 @@ static struct bpf_test tests[] = { ...@@ -4398,6 +4474,22 @@ static struct bpf_test tests[] = {
{ { 0, 0xababcbac } }, { { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11, .fill_helper = bpf_fill_maxinsns11,
}, },
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
CLASSIC,
{ },
{ { 1, 0xbee } },
.fill_helper = bpf_fill_ld_abs_get_processor_id,
},
{
"BPF_MAXINSNS: ld_abs+vlan_push/pop",
{ },
INTERNAL,
{ 0x34 },
{ { 1, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
}; };
static struct net_device dev; static struct net_device dev;
...@@ -4551,14 +4643,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data, ...@@ -4551,14 +4643,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish; u64 start, finish;
int ret = 0, i; int ret = 0, i;
start = ktime_to_us(ktime_get()); start = ktime_get_ns();
for (i = 0; i < runs; i++) for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data); ret = BPF_PROG_RUN(fp, data);
finish = ktime_to_us(ktime_get()); finish = ktime_get_ns();
*duration = (finish - start) * 1000ULL; *duration = finish - start;
do_div(*duration, runs); do_div(*duration, runs);
return ret; return ret;
......
...@@ -1437,6 +1437,52 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { ...@@ -1437,6 +1437,52 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.arg1_type = ARG_PTR_TO_CTX, .arg1_type = ARG_PTR_TO_CTX,
}; };
static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
__be16 vlan_proto = (__force __be16) r2;
if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
vlan_proto != htons(ETH_P_8021AD)))
vlan_proto = htons(ETH_P_8021Q);
return skb_vlan_push(skb, vlan_proto, vlan_tci);
}
const struct bpf_func_proto bpf_skb_vlan_push_proto = {
.func = bpf_skb_vlan_push,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
struct sk_buff *skb = (struct sk_buff *) (long) r1;
return skb_vlan_pop(skb);
}
const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
.func = bpf_skb_vlan_pop,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
bool bpf_helper_changes_skb_data(void *func)
{
if (func == bpf_skb_vlan_push)
return true;
if (func == bpf_skb_vlan_pop)
return true;
return false;
}
static const struct bpf_func_proto * static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id) sk_filter_func_proto(enum bpf_func_id func_id)
{ {
...@@ -1476,6 +1522,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) ...@@ -1476,6 +1522,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_clone_redirect_proto; return &bpf_clone_redirect_proto;
case BPF_FUNC_get_cgroup_classid: case BPF_FUNC_get_cgroup_classid:
return &bpf_get_cgroup_classid_proto; return &bpf_get_cgroup_classid_proto;
case BPF_FUNC_skb_vlan_push:
return &bpf_skb_vlan_push_proto;
case BPF_FUNC_skb_vlan_pop:
return &bpf_skb_vlan_pop_proto;
default: default:
return sk_filter_func_proto(func_id); return sk_filter_func_proto(func_id);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment