Commit 6361c845 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes (and cleanups) from Catalin Marinas:
 "Various arm64 fixes:

   - suspicious RCU usage warning
   - BPF (out of bounds array read and endianness conversion)
   - perf (of_node usage after of_node_put, cpu_pmu->plat_device
     assignment)
   - huge pmd/pud check for value 0
   - rate-limiting should only take unhandled signals into account

  Clean-up:

   - incorrect use of pgprot_t type
   - unused header include
   - __init annotation to arm_cpuidle_init
   - pr_debug instead of pr_error for disabled GICC entries in
     ACPI/MADT"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Fix show_unhandled_signal_ratelimited usage
  ARM64 / SMP: Switch pr_err() to pr_debug() for disabled GICC entry
  arm64: cpuidle: add __init section marker to arm_cpuidle_init
  arm64: Don't report clear pmds and puds as huge
  arm64: perf: fix unassigned cpu_pmu->plat_device when probing PMU PPIs
  arm64: perf: Don't use of_node after putting it
  arm64: fix incorrect use of pgprot_t variable
  arm64/hw_breakpoint.c: remove unnecessary header
  arm64: bpf: fix endianness conversion bugs
  arm64: bpf: fix out-of-bounds read in bpf2a64_offset()
  ARM64: smp: Fix suspicious RCU usage with ipi tracepoints
parents 31351f73 f871d268
......@@ -15,7 +15,7 @@
#include <asm/cpuidle.h>
#include <asm/cpu_ops.h>
int arm_cpuidle_init(unsigned int cpu)
int __init arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
......
......@@ -31,7 +31,6 @@
#include <asm/current.h>
#include <asm/debug-monitors.h>
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
......
......@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
return 0;
goto out;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
......@@ -1340,12 +1340,13 @@ static int armpmu_device_probe(struct platform_device *pdev)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
of_node_put(dn);
break;
}
of_node_put(dn);
irqs[i] = cpu;
}
......@@ -1355,6 +1356,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
else
kfree(irqs);
out:
cpu_pmu->plat_device = pdev;
return 0;
}
......
......@@ -396,13 +396,13 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
if (!(processor->flags & ACPI_MADT_ENABLED)) {
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
if (!(processor->flags & ACPI_MADT_ENABLED)) {
pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
return;
}
......@@ -693,7 +693,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
if ((unsigned)ipinr < NR_IPI) {
trace_ipi_entry(ipi_types[ipinr]);
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
......@@ -736,7 +736,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit(ipi_types[ipinr]);
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
......
......@@ -335,7 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
......
......@@ -115,7 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr);
......
......@@ -33,13 +33,13 @@
int pmd_huge(pmd_t pmd)
{
return !(pmd_val(pmd) & PMD_TABLE_BIT);
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
}
int pud_huge(pud_t pud)
{
#ifndef __PAGETABLE_PMD_FOLDED
return !(pud_val(pud) & PUD_TABLE_BIT);
return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
#else
return 0;
#endif
......
......@@ -117,7 +117,7 @@ void split_pud(pud_t *old_pud, pmd_t *pmd)
int i = 0;
do {
set_pmd(pmd, __pmd(addr | prot));
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
addr += PMD_SIZE;
} while (pmd++, i++, i < PTRS_PER_PMD);
}
......
......@@ -110,6 +110,10 @@
/* Rd = Rn >> shift; signed */
#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
/* Zero extend */
#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
/* Move wide (immediate) */
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
aarch64_insn_gen_movewide(Rd, imm16, shift, \
......
......@@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
static inline int bpf2a64_offset(int bpf_to, int bpf_from,
const struct jit_ctx *ctx)
{
int to = ctx->offset[bpf_to + 1];
int to = ctx->offset[bpf_to];
/* -1 to account for the Branch instruction */
int from = ctx->offset[bpf_from + 1] - 1;
int from = ctx->offset[bpf_from] - 1;
return to - from;
}
......@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef CONFIG_CPU_BIG_ENDIAN
if (BPF_SRC(code) == BPF_FROM_BE)
break;
goto emit_bswap_uxt;
#else /* !CONFIG_CPU_BIG_ENDIAN */
if (BPF_SRC(code) == BPF_FROM_LE)
break;
goto emit_bswap_uxt;
#endif
switch (imm) {
case 16:
emit(A64_REV16(is64, dst, dst), ctx);
/* zero-extend 16 bits into 64 bits */
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
emit(A64_REV32(is64, dst, dst), ctx);
/* upper 32 bits already cleared */
break;
case 64:
emit(A64_REV64(dst, dst), ctx);
break;
}
break;
emit_bswap_uxt:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
emit(A64_UXTH(is64, dst, dst), ctx);
break;
case 32:
/* zero-extend 32 bits into 64 bits */
emit(A64_UXTW(is64, dst, dst), ctx);
break;
case 64:
/* nop */
break;
}
break;
/* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K:
......@@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
ret = build_insn(insn, ctx);
if (ctx->image == NULL)
ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx);
if (ret > 0) {
i++;
continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment