Commit 1adce1b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_alternatives_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm alternatives updates from Borislav Petkov:

 - Teach the static_call patching infrastructure to handle conditional
   tall calls properly which can be static calls too

 - Add proper struct alt_instr.flags which controls different aspects of
   insn patching behavior

* tag 'x86_alternatives_for_v6.3_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/static_call: Add support for Jcc tail-calls
  x86/alternatives: Teach text_poke_bp() to patch Jcc.d32 instructions
  x86/alternatives: Introduce int3_emulate_jcc()
  x86/alternatives: Add alt_instr.flags
parents d9de5ce8 923510c8
This diff is collapsed.
...@@ -184,6 +184,37 @@ void int3_emulate_ret(struct pt_regs *regs) ...@@ -184,6 +184,37 @@ void int3_emulate_ret(struct pt_regs *regs)
unsigned long ip = int3_emulate_pop(regs); unsigned long ip = int3_emulate_pop(regs);
int3_emulate_jmp(regs, ip); int3_emulate_jmp(regs, ip);
} }
static __always_inline
void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
{
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
bool invert = cc & 1;
bool match;
if (cc < 0xc) {
match = regs->flags & jcc_mask[cc >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (cc >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
if ((match && !invert) || (!match && invert))
ip += disp;
int3_emulate_jmp(regs, ip);
}
#endif /* !CONFIG_UML_X86 */ #endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */ #endif /* _ASM_X86_TEXT_PATCHING_H */
...@@ -282,27 +282,25 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -282,27 +282,25 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
*/ */
for (a = start; a < end; a++) { for (a = start; a < end; a++) {
int insn_buff_sz = 0; int insn_buff_sz = 0;
/* Mask away "NOT" flag bit for feature to test. */
u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
instr = (u8 *)&a->instr_offset + a->instr_offset; instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->instrlen > sizeof(insn_buff)); BUG_ON(a->instrlen > sizeof(insn_buff));
BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32); BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
/* /*
* Patch if either: * Patch if either:
* - feature is present * - feature is present
* - feature not present but ALTINSTR_FLAG_INV is set to mean, * - feature not present but ALT_FLAG_NOT is set to mean,
* patch if feature is *NOT* present. * patch if feature is *NOT* present.
*/ */
if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV)) if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT))
goto next; goto next;
DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)", DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "", (a->flags & ALT_FLAG_NOT) ? "!" : "",
feature >> 5, a->cpuid >> 5,
feature & 0x1f, a->cpuid & 0x1f,
instr, instr, a->instrlen, instr, instr, a->instrlen,
replacement, a->replacementlen); replacement, a->replacementlen);
...@@ -340,6 +338,12 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, ...@@ -340,6 +338,12 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
} }
} }
static inline bool is_jcc32(struct insn *insn)
{
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
}
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL) #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
/* /*
...@@ -378,12 +382,6 @@ static int emit_indirect(int op, int reg, u8 *bytes) ...@@ -378,12 +382,6 @@ static int emit_indirect(int op, int reg, u8 *bytes)
return i; return i;
} }
static inline bool is_jcc32(struct insn *insn)
{
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
}
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{ {
u8 op = insn->opcode.bytes[0]; u8 op = insn->opcode.bytes[0];
...@@ -1772,6 +1770,11 @@ void text_poke_sync(void) ...@@ -1772,6 +1770,11 @@ void text_poke_sync(void)
on_each_cpu(do_sync_core, NULL, 1); on_each_cpu(do_sync_core, NULL, 1);
} }
/*
* NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
* this thing. When len == 6 everything is prefixed with 0x0f and we map
* opcode to Jcc.d8, using len to distinguish.
*/
struct text_poke_loc { struct text_poke_loc {
/* addr := _stext + rel_addr */ /* addr := _stext + rel_addr */
s32 rel_addr; s32 rel_addr;
...@@ -1893,6 +1896,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs) ...@@ -1893,6 +1896,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
int3_emulate_jmp(regs, (long)ip + tp->disp); int3_emulate_jmp(regs, (long)ip + tp->disp);
break; break;
case 0x70 ... 0x7f: /* Jcc */
int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
break;
default: default:
BUG(); BUG();
} }
...@@ -1966,16 +1973,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries ...@@ -1966,16 +1973,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* Second step: update all but the first byte of the patched range. * Second step: update all but the first byte of the patched range.
*/ */
for (do_sync = 0, i = 0; i < nr_entries; i++) { for (do_sync = 0, i = 0; i < nr_entries; i++) {
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, }; u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
u8 _new[POKE_MAX_OPCODE_SIZE+1];
const u8 *new = tp[i].text;
int len = tp[i].len; int len = tp[i].len;
if (len - INT3_INSN_SIZE > 0) { if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE, memcpy(old + INT3_INSN_SIZE,
text_poke_addr(&tp[i]) + INT3_INSN_SIZE, text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
len - INT3_INSN_SIZE); len - INT3_INSN_SIZE);
if (len == 6) {
_new[0] = 0x0f;
memcpy(_new + 1, new, 5);
new = _new;
}
text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE, text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
(const char *)tp[i].text + INT3_INSN_SIZE, new + INT3_INSN_SIZE,
len - INT3_INSN_SIZE); len - INT3_INSN_SIZE);
do_sync++; do_sync++;
} }
...@@ -2003,8 +2020,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries ...@@ -2003,8 +2020,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* The old instruction is recorded so that the event can be * The old instruction is recorded so that the event can be
* processed forwards or backwards. * processed forwards or backwards.
*/ */
perf_event_text_poke(text_poke_addr(&tp[i]), old, len, perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
tp[i].text, len);
} }
if (do_sync) { if (do_sync) {
...@@ -2021,10 +2037,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries ...@@ -2021,10 +2037,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* replacing opcode. * replacing opcode.
*/ */
for (do_sync = 0, i = 0; i < nr_entries; i++) { for (do_sync = 0, i = 0; i < nr_entries; i++) {
if (tp[i].text[0] == INT3_INSN_OPCODE) u8 byte = tp[i].text[0];
if (tp[i].len == 6)
byte = 0x0f;
if (byte == INT3_INSN_OPCODE)
continue; continue;
text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE); text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
do_sync++; do_sync++;
} }
...@@ -2042,9 +2063,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, ...@@ -2042,9 +2063,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate) const void *opcode, size_t len, const void *emulate)
{ {
struct insn insn; struct insn insn;
int ret, i; int ret, i = 0;
memcpy((void *)tp->text, opcode, len); if (len == 6)
i = 1;
memcpy((void *)tp->text, opcode+i, len-i);
if (!emulate) if (!emulate)
emulate = opcode; emulate = opcode;
...@@ -2055,6 +2078,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, ...@@ -2055,6 +2078,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
tp->len = len; tp->len = len;
tp->opcode = insn.opcode.bytes[0]; tp->opcode = insn.opcode.bytes[0];
if (is_jcc32(&insn)) {
/*
* Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
*/
tp->opcode = insn.opcode.bytes[1] - 0x10;
}
switch (tp->opcode) { switch (tp->opcode) {
case RET_INSN_OPCODE: case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE: case JMP32_INSN_OPCODE:
...@@ -2071,7 +2101,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, ...@@ -2071,7 +2101,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
BUG_ON(len != insn.length); BUG_ON(len != insn.length);
} }
switch (tp->opcode) { switch (tp->opcode) {
case INT3_INSN_OPCODE: case INT3_INSN_OPCODE:
case RET_INSN_OPCODE: case RET_INSN_OPCODE:
...@@ -2080,6 +2109,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, ...@@ -2080,6 +2109,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case CALL_INSN_OPCODE: case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE: case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE: case JMP8_INSN_OPCODE:
case 0x70 ... 0x7f: /* Jcc */
tp->disp = insn.immediate.value; tp->disp = insn.immediate.value;
break; break;
......
...@@ -464,50 +464,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs) ...@@ -464,50 +464,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
} }
NOKPROBE_SYMBOL(kprobe_emulate_call); NOKPROBE_SYMBOL(kprobe_emulate_call);
static nokprobe_inline static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
{ {
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size; unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (cond) ip += p->ainsn.rel32;
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip); int3_emulate_jmp(regs, ip);
} }
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
__kprobe_emulate_jmp(p, regs, true);
}
NOKPROBE_SYMBOL(kprobe_emulate_jmp); NOKPROBE_SYMBOL(kprobe_emulate_jmp);
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs) static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
{ {
bool invert = p->ainsn.jcc.type & 1; unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
bool match;
if (p->ainsn.jcc.type < 0xc) { int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (p->ainsn.jcc.type >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
} }
NOKPROBE_SYMBOL(kprobe_emulate_jcc); NOKPROBE_SYMBOL(kprobe_emulate_jcc);
static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
{ {
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
bool match; bool match;
if (p->ainsn.loop.type != 3) { /* LOOP* */ if (p->ainsn.loop.type != 3) { /* LOOP* */
...@@ -535,7 +511,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs) ...@@ -535,7 +511,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
else if (p->ainsn.loop.type == 1) /* LOOPE */ else if (p->ainsn.loop.type == 1) /* LOOPE */
match = match && (regs->flags & X86_EFLAGS_ZF); match = match && (regs->flags & X86_EFLAGS_ZF);
__kprobe_emulate_jmp(p, regs, match); if (match)
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
} }
NOKPROBE_SYMBOL(kprobe_emulate_loop); NOKPROBE_SYMBOL(kprobe_emulate_loop);
......
...@@ -9,6 +9,7 @@ enum insn_type { ...@@ -9,6 +9,7 @@ enum insn_type {
NOP = 1, /* site cond-call */ NOP = 1, /* site cond-call */
JMP = 2, /* tramp / site tail-call */ JMP = 2, /* tramp / site tail-call */
RET = 3, /* tramp / site cond-tail-call */ RET = 3, /* tramp / site cond-tail-call */
JCC = 4,
}; };
/* /*
...@@ -25,12 +26,40 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 }; ...@@ -25,12 +26,40 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc }; static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
{
u8 ret = 0;
if (insn[0] == 0x0f) {
u8 tmp = insn[1];
if ((tmp & 0xf0) == 0x80)
ret = tmp;
}
return ret;
}
extern void __static_call_return(void);
asm (".global __static_call_return\n\t"
".type __static_call_return, @function\n\t"
ASM_FUNC_ALIGN "\n\t"
"__static_call_return:\n\t"
ANNOTATE_NOENDBR
ANNOTATE_RETPOLINE_SAFE
"ret; int3\n\t"
".size __static_call_return, . - __static_call_return \n\t");
static void __ref __static_call_transform(void *insn, enum insn_type type, static void __ref __static_call_transform(void *insn, enum insn_type type,
void *func, bool modinit) void *func, bool modinit)
{ {
const void *emulate = NULL; const void *emulate = NULL;
int size = CALL_INSN_SIZE; int size = CALL_INSN_SIZE;
const void *code; const void *code;
u8 op, buf[6];
if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
type = JCC;
switch (type) { switch (type) {
case CALL: case CALL:
...@@ -57,6 +86,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, ...@@ -57,6 +86,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
else else
code = &retinsn; code = &retinsn;
break; break;
case JCC:
if (!func) {
func = __static_call_return;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
func = x86_return_thunk;
}
buf[0] = 0x0f;
__text_gen_insn(buf+1, op, insn+1, func, 5);
code = buf;
size = 6;
break;
} }
if (memcmp(insn, code, size) == 0) if (memcmp(insn, code, size) == 0)
...@@ -68,9 +111,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, ...@@ -68,9 +111,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
text_poke_bp(insn, code, size, emulate); text_poke_bp(insn, code, size, emulate);
} }
static void __static_call_validate(void *insn, bool tail, bool tramp) static void __static_call_validate(u8 *insn, bool tail, bool tramp)
{ {
u8 opcode = *(u8 *)insn; u8 opcode = insn[0];
if (tramp && memcmp(insn+5, tramp_ud, 3)) { if (tramp && memcmp(insn+5, tramp_ud, 3)) {
pr_err("trampoline signature fail"); pr_err("trampoline signature fail");
...@@ -79,7 +122,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp) ...@@ -79,7 +122,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
if (tail) { if (tail) {
if (opcode == JMP32_INSN_OPCODE || if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE) opcode == RET_INSN_OPCODE ||
__is_Jcc(insn))
return; return;
} else { } else {
if (opcode == CALL_INSN_OPCODE || if (opcode == CALL_INSN_OPCODE ||
......
...@@ -11,11 +11,11 @@ ...@@ -11,11 +11,11 @@
#define JUMP_NEW_OFFSET 4 #define JUMP_NEW_OFFSET 4
#define JUMP_KEY_OFFSET 8 #define JUMP_KEY_OFFSET 8
#define ALT_ENTRY_SIZE 12 #define ALT_ENTRY_SIZE 14
#define ALT_ORIG_OFFSET 0 #define ALT_ORIG_OFFSET 0
#define ALT_NEW_OFFSET 4 #define ALT_NEW_OFFSET 4
#define ALT_FEATURE_OFFSET 8 #define ALT_FEATURE_OFFSET 8
#define ALT_ORIG_LEN_OFFSET 10 #define ALT_ORIG_LEN_OFFSET 12
#define ALT_NEW_LEN_OFFSET 11 #define ALT_NEW_LEN_OFFSET 13
#endif /* _X86_ARCH_SPECIAL_H */ #endif /* _X86_ARCH_SPECIAL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment