Commit b553a6ec authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf: Simplify __bpf_arch_text_poke poke type handling

Given that we have BPF_MOD_NOP_TO_{CALL,JUMP}, BPF_MOD_{CALL,JUMP}_TO_NOP
and BPF_MOD_{CALL,JUMP}_TO_{CALL,JUMP} poke types and that we also pass in
old_addr as well as new_addr, it's a bit redundant and unnecessarily
complicates __bpf_arch_text_poke() itself since we can derive the same from
the *_addr that were passed in. Hence simplify and use BPF_MOD_{CALL,JUMP}
as types which also allows to clean up call-sites.

In addition to that, __bpf_arch_text_poke() currently verifies that text
matches expected old_insn before we invoke text_poke_bp(). Also add a check
on new_insn and skip rewrite if it already matches. Reason why this is rather
useful is that it avoids making any special casing in prog_array_map_poke_run()
when old and new prog were NULL and has the benefit that also for this case
we perform a check on text whether it really matches our expectations.
Suggested-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/fcb00a2b0b288d6c73de4ef58116a821c8fe8f2f.1574555798.git.daniel@iogearbox.net
parent f9a7cf6e
...@@ -269,76 +269,42 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, ...@@ -269,76 +269,42 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr, void *old_addr, void *new_addr,
const bool text_live) const bool text_live)
{ {
int (*emit_patch_fn)(u8 **pprog, void *func, void *ip);
const u8 *nop_insn = ideal_nops[NOP_ATOMIC5]; const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
u8 old_insn[X86_PATCH_SIZE] = {}; u8 old_insn[X86_PATCH_SIZE];
u8 new_insn[X86_PATCH_SIZE] = {}; u8 new_insn[X86_PATCH_SIZE];
u8 *prog; u8 *prog;
int ret; int ret;
switch (t) { memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
case BPF_MOD_NOP_TO_CALL ... BPF_MOD_CALL_TO_NOP: if (old_addr) {
emit_patch_fn = emit_call; prog = old_insn;
break; ret = t == BPF_MOD_CALL ?
case BPF_MOD_NOP_TO_JUMP ... BPF_MOD_JUMP_TO_NOP: emit_call(&prog, old_addr, ip) :
emit_patch_fn = emit_jump; emit_jump(&prog, old_addr, ip);
break; if (ret)
default: return ret;
return -ENOTSUPP;
} }
switch (t) { memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
case BPF_MOD_NOP_TO_CALL: if (new_addr) {
case BPF_MOD_NOP_TO_JUMP: prog = new_insn;
if (!old_addr && new_addr) { ret = t == BPF_MOD_CALL ?
memcpy(old_insn, nop_insn, X86_PATCH_SIZE); emit_call(&prog, new_addr, ip) :
emit_jump(&prog, new_addr, ip);
prog = new_insn; if (ret)
ret = emit_patch_fn(&prog, new_addr, ip); return ret;
if (ret)
return ret;
break;
}
return -ENXIO;
case BPF_MOD_CALL_TO_CALL:
case BPF_MOD_JUMP_TO_JUMP:
if (old_addr && new_addr) {
prog = old_insn;
ret = emit_patch_fn(&prog, old_addr, ip);
if (ret)
return ret;
prog = new_insn;
ret = emit_patch_fn(&prog, new_addr, ip);
if (ret)
return ret;
break;
}
return -ENXIO;
case BPF_MOD_CALL_TO_NOP:
case BPF_MOD_JUMP_TO_NOP:
if (old_addr && !new_addr) {
memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
prog = old_insn;
ret = emit_patch_fn(&prog, old_addr, ip);
if (ret)
return ret;
break;
}
return -ENXIO;
default:
return -ENOTSUPP;
} }
ret = -EBUSY; ret = -EBUSY;
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
if (memcmp(ip, old_insn, X86_PATCH_SIZE)) if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out; goto out;
if (text_live) if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL); if (text_live)
else text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
memcpy(ip, new_insn, X86_PATCH_SIZE); else
memcpy(ip, new_insn, X86_PATCH_SIZE);
}
ret = 0; ret = 0;
out: out:
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
...@@ -465,7 +431,6 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke, ...@@ -465,7 +431,6 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
{ {
static const enum bpf_text_poke_type type = BPF_MOD_NOP_TO_JUMP;
struct bpf_jit_poke_descriptor *poke; struct bpf_jit_poke_descriptor *poke;
struct bpf_array *array; struct bpf_array *array;
struct bpf_prog *target; struct bpf_prog *target;
...@@ -490,7 +455,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) ...@@ -490,7 +455,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
* read-only. Both modifications on the given image * read-only. Both modifications on the given image
* are under text_mutex to avoid interference. * are under text_mutex to avoid interference.
*/ */
ret = __bpf_arch_text_poke(poke->ip, type, NULL, ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func + (u8 *)target->bpf_func +
poke->adj_off, false); poke->adj_off, false);
BUG_ON(ret < 0); BUG_ON(ret < 0);
......
...@@ -1324,14 +1324,8 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, ...@@ -1324,14 +1324,8 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
enum bpf_text_poke_type { enum bpf_text_poke_type {
/* All call-related pokes. */ BPF_MOD_CALL,
BPF_MOD_NOP_TO_CALL, BPF_MOD_JUMP,
BPF_MOD_CALL_TO_CALL,
BPF_MOD_CALL_TO_NOP,
/* All jump-related pokes. */
BPF_MOD_NOP_TO_JUMP,
BPF_MOD_JUMP_TO_JUMP,
BPF_MOD_JUMP_TO_NOP,
}; };
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
......
...@@ -746,19 +746,9 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key, ...@@ -746,19 +746,9 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old, struct bpf_prog *old,
struct bpf_prog *new) struct bpf_prog *new)
{ {
enum bpf_text_poke_type type;
struct prog_poke_elem *elem; struct prog_poke_elem *elem;
struct bpf_array_aux *aux; struct bpf_array_aux *aux;
if (!old && new)
type = BPF_MOD_NOP_TO_JUMP;
else if (old && !new)
type = BPF_MOD_JUMP_TO_NOP;
else if (old && new)
type = BPF_MOD_JUMP_TO_JUMP;
else
return;
aux = container_of(map, struct bpf_array, map)->aux; aux = container_of(map, struct bpf_array, map)->aux;
WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
...@@ -806,7 +796,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key, ...@@ -806,7 +796,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
poke->tail_call.key != key) poke->tail_call.key != key)
continue; continue;
ret = bpf_arch_text_poke(poke->ip, type, ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
old ? (u8 *)old->bpf_func + old ? (u8 *)old->bpf_func +
poke->adj_off : NULL, poke->adj_off : NULL,
new ? (u8 *)new->bpf_func + new ? (u8 *)new->bpf_func +
......
...@@ -77,7 +77,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -77,7 +77,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
int err; int err;
if (fentry_cnt + fexit_cnt == 0) { if (fentry_cnt + fexit_cnt == 0) {
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_NOP, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, NULL); old_image, NULL);
tr->selector = 0; tr->selector = 0;
goto out; goto out;
...@@ -105,12 +105,12 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -105,12 +105,12 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
if (tr->selector) if (tr->selector)
/* progs already running at this address */ /* progs already running at this address */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_CALL, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, new_image); old_image, new_image);
else else
/* first time registering */ /* first time registering */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_NOP_TO_CALL, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
NULL, new_image); new_image);
if (err) if (err)
goto out; goto out;
tr->selector++; tr->selector++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment