Commit ef06e682 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - Revert the moving of the jump labels initialisation before
   setup_machine_fdt(). The bug was fixed in drivers/char/random.c.

 - Ftrace fixes: branch range check and consistent handling of PLTs.

 - Clean rather than invalidate FROM_DEVICE buffers at start of DMA
   transfer (safer if such buffer is mapped in user space). A cache
   invalidation is done already at the end of the transfer.

 - A couple of clean-ups (unexport symbol, remove unused label).

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer
  arm64/cpufeature: Unexport set_cpu_feature()
  arm64: ftrace: remove redundant label
  arm64: ftrace: consistently handle PLTs.
  arm64: ftrace: fix branch range checks
  Revert "arm64: Initialize jump labels before setup_machine_fdt()"
parents cc2fb31d c50f11c6
...@@ -3101,7 +3101,6 @@ void cpu_set_feature(unsigned int num) ...@@ -3101,7 +3101,6 @@ void cpu_set_feature(unsigned int num)
WARN_ON(num >= MAX_CPU_FEATURES); WARN_ON(num >= MAX_CPU_FEATURES);
elf_hwcap |= BIT(num); elf_hwcap |= BIT(num);
} }
EXPORT_SYMBOL_GPL(cpu_set_feature);
bool cpu_have_feature(unsigned int num) bool cpu_have_feature(unsigned int num)
{ {
......
...@@ -102,7 +102,6 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ...@@ -102,7 +102,6 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
* to restore x0-x8, x29, and x30. * to restore x0-x8, x29, and x30.
*/ */
ftrace_common_return:
/* Restore function arguments */ /* Restore function arguments */
ldp x0, x1, [sp] ldp x0, x1, [sp]
ldp x2, x3, [sp, #S_X2] ldp x2, x3, [sp, #S_X2]
......
...@@ -78,46 +78,75 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) ...@@ -78,46 +78,75 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
} }
/* /*
* Turn on the call to ftrace_caller() in instrumented function * Find the address the callsite must branch to in order to reach '*addr'.
*
* Due to the limited range of 'BL' instructions, modules may be placed too far
* away to branch directly and must use a PLT.
*
* Returns true when '*addr' contains a reachable target address, or has been
* modified to contain a PLT address. Returns false otherwise.
*/ */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
struct module *mod,
unsigned long *addr)
{ {
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
u32 old, new; long offset = (long)*addr - (long)pc;
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
struct module *mod;
struct plt_entry *plt; struct plt_entry *plt;
/*
* When the target is within range of the 'BL' instruction, use 'addr'
* as-is and branch to that directly.
*/
if (offset >= -SZ_128M && offset < SZ_128M)
return true;
/*
* When the target is outside of the range of a 'BL' instruction, we
* must use a PLT to reach it. We can only place PLTs for modules, and
* only when module PLT support is built-in.
*/
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
return -EINVAL; return false;
/* /*
* On kernels that support module PLTs, the offset between the * 'mod' is only set at module load time, but if we end up
* branch instruction and its target may legally exceed the * dealing with an out-of-range condition, we can assume it
* range of an ordinary relative 'bl' opcode. In this case, we * is due to a module being loaded far away from the kernel.
* need to branch via a trampoline in the module.
* *
* NOTE: __module_text_address() must be called with preemption * NOTE: __module_text_address() must be called with preemption
* disabled, but we can rely on ftrace_lock to ensure that 'mod' * disabled, but we can rely on ftrace_lock to ensure that 'mod'
* retains its validity throughout the remainder of this code. * retains its validity throughout the remainder of this code.
*/ */
if (!mod) {
preempt_disable(); preempt_disable();
mod = __module_text_address(pc); mod = __module_text_address(pc);
preempt_enable(); preempt_enable();
}
if (WARN_ON(!mod)) if (WARN_ON(!mod))
return -EINVAL; return false;
plt = get_ftrace_plt(mod, addr); plt = get_ftrace_plt(mod, *addr);
if (!plt) { if (!plt) {
pr_err("ftrace: no module PLT for %ps\n", (void *)addr); pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
return -EINVAL; return false;
} }
addr = (unsigned long)plt; *addr = (unsigned long)plt;
} return true;
}
/*
* Turn on the call to ftrace_caller() in instrumented function
*/
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
old = aarch64_insn_gen_nop(); old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
...@@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, ...@@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
u32 old, new; u32 old, new;
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
return -EINVAL;
if (!ftrace_find_callable_addr(rec, NULL, &addr))
return -EINVAL;
old = aarch64_insn_gen_branch_imm(pc, old_addr, old = aarch64_insn_gen_branch_imm(pc, old_addr,
AARCH64_INSN_BRANCH_LINK); AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
...@@ -181,54 +215,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -181,54 +215,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr) unsigned long addr)
{ {
unsigned long pc = rec->ip; unsigned long pc = rec->ip;
bool validate = true;
u32 old = 0, new; u32 old = 0, new;
long offset = (long)pc - (long)addr;
if (offset < -SZ_128M || offset >= SZ_128M) {
u32 replaced;
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL; return -EINVAL;
/* old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
* 'mod' is only set at module load time, but if we end up
* dealing with an out-of-range condition, we can assume it
* is due to a module being loaded far away from the kernel.
*/
if (!mod) {
preempt_disable();
mod = __module_text_address(pc);
preempt_enable();
if (WARN_ON(!mod))
return -EINVAL;
}
/*
* The instruction we are about to patch may be a branch and
* link instruction that was redirected via a PLT entry. In
* this case, the normal validation will fail, but we can at
* least check that we are dealing with a branch and link
* instruction that points into the right module.
*/
if (aarch64_insn_read((void *)pc, &replaced))
return -EFAULT;
if (!aarch64_insn_is_bl(replaced) ||
!within_module(pc + aarch64_get_branch_offset(replaced),
mod))
return -EINVAL;
validate = false;
} else {
old = aarch64_insn_gen_branch_imm(pc, addr,
AARCH64_INSN_BRANCH_LINK);
}
new = aarch64_insn_gen_nop(); new = aarch64_insn_gen_nop();
return ftrace_modify_code(pc, old, new, validate); return ftrace_modify_code(pc, old, new, true);
} }
void arch_ftrace_update_code(int command) void arch_ftrace_update_code(int command)
......
...@@ -303,14 +303,13 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) ...@@ -303,14 +303,13 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
early_fixmap_init(); early_fixmap_init();
early_ioremap_init(); early_ioremap_init();
setup_machine_fdt(__fdt_pointer);
/* /*
* Initialise the static keys early as they may be enabled by the * Initialise the static keys early as they may be enabled by the
* cpufeature code, early parameters, and DT setup. * cpufeature code and early parameters.
*/ */
jump_label_init(); jump_label_init();
setup_machine_fdt(__fdt_pointer);
parse_early_param(); parse_early_param();
/* /*
......
...@@ -218,8 +218,6 @@ SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area) ...@@ -218,8 +218,6 @@ SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
*/ */
SYM_FUNC_START(__pi___dma_map_area) SYM_FUNC_START(__pi___dma_map_area)
add x1, x0, x1 add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __pi_dcache_inval_poc
b __pi_dcache_clean_poc b __pi_dcache_clean_poc
SYM_FUNC_END(__pi___dma_map_area) SYM_FUNC_END(__pi___dma_map_area)
SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area) SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment