Commit 91a87a58 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Alexei Starovoitov

nfp: bpf: split up the skip flag

We fail program loading if jump lands on a skipped instruction.
This is for historical reasons, it used to be that we only skipped
instructions optimized out based on prior context, and therefore
the optimization would be buggy if we jumped directly to such
instruction (because the context would be skipped by the jump).

There are cases where instructions can be skipped without any
context, for example there is no point in generating code for:

	 r0 |= 0

We will also soon support dropping dead code, so make the skip
logic differentiate between "optimized with preceding context"
vs other skip types.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent e90287f3
......@@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u64 imm = insn->imm; /* sign extend */
if (skip) {
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
......@@ -1296,7 +1296,7 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
if (skip) {
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
......@@ -3395,7 +3395,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
......@@ -3439,7 +3439,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
jmp_dst = meta->jmp_dst;
if (jmp_dst->skip) {
if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
......@@ -3689,7 +3689,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
return nfp_prog->error;
}
if (meta->skip) {
if (meta->flags & FLAG_INSN_SKIP_MASK) {
nfp_prog->n_translated++;
continue;
}
......@@ -3737,10 +3737,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
/* Programs start with R6 = R1 but we ignore the skb pointer */
if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
insn.src_reg == 1 && insn.dst_reg == 6)
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
/* Return as soon as something doesn't match */
if (!meta->skip)
if (!(meta->flags & FLAG_INSN_SKIP_MASK))
return;
}
}
......@@ -3755,7 +3755,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
list_for_each_entry(meta, &nfp_prog->insns, l) {
struct bpf_insn insn = meta->insn;
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
......@@ -3829,7 +3829,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
continue;
meta2->skip = true;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
......@@ -3869,8 +3869,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
meta3->flags & FLAG_INSN_IS_JUMP_DST)
continue;
meta2->skip = true;
meta3->skip = true;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
......@@ -4065,7 +4065,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
head_ld_meta->paired_st = &head_st_meta->insn;
head_st_meta->skip = true;
head_st_meta->flags |=
FLAG_INSN_SKIP_PREC_DEPENDENT;
} else {
head_ld_meta->ldst_gather_len = 0;
}
......@@ -4098,8 +4099,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
head_ld_meta = meta1;
head_st_meta = meta2;
} else {
meta1->skip = true;
meta2->skip = true;
meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
......@@ -4124,7 +4125,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
if (meta->flags & FLAG_INSN_IS_JUMP_DST)
cache_avail = false;
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
insn = &meta->insn;
......@@ -4210,7 +4211,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
}
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
......@@ -4246,7 +4247,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip)
if (meta1->flags & FLAG_INSN_SKIP_MASK ||
meta2->flags & FLAG_INSN_SKIP_MASK)
continue;
if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
......
......@@ -243,6 +243,13 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_IS_JUMP_DST BIT(0)
#define FLAG_INSN_IS_SUBPROG_START BIT(1)
#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
/* Instruction is pointless, noop even on its own */
#define FLAG_INSN_SKIP_NOOP BIT(3)
/* Instruction is optimized out based on preceding instructions */
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT)
/**
* struct nfp_insn_meta - BPF instruction wrapper
......@@ -271,7 +278,6 @@ struct nfp_bpf_reg_state {
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
* @subprog_idx: index of subprogram to which the instruction belongs
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
......@@ -319,7 +325,6 @@ struct nfp_insn_meta {
unsigned short n;
unsigned short flags;
unsigned short subprog_idx;
bool skip;
instr_cb_t double_cb;
struct list_head l;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment