Commit 19b522db authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Nothing too bad, but the spectre updates to smatch identified a few
  places that may need sanitising so we've got those covered.

  Details:

   - Close some potential spectre-v1 vulnerabilities found by smatch

   - Add missing list sentinel for CPUs that don't require KPTI

   - Removal of unused 'addr' parameter for I/D cache coherency

   - Removal of redundant set_fs(KERNEL_DS) calls in ptrace

   - Fix single-stepping state machine handling in response to kernel
     traps

   - Clang support for 128-bit integers

   - Avoid instrumenting our out-of-line atomics in preparation for
     enabling LSE atomics by default in 4.18"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: avoid instrumenting atomic_ll_sc.o
  KVM: arm/arm64: vgic: fix possible spectre-v1 in vgic_mmio_read_apr()
  KVM: arm/arm64: vgic: fix possible spectre-v1 in vgic_get_irq()
  arm64: fix possible spectre-v1 in ptrace_hbp_get_event()
  arm64: support __int128 with clang
  arm64: only advance singlestep for user instruction traps
  arm64/kernel: rename module_emit_adrp_veneer->module_emit_veneer_for_adrp
  arm64: ptrace: remove addr_limit manipulation
  arm64: mm: drop addr parameter from sync icache and dcache
  arm64: add sentinel to kpti_safe_list
parents 7b87308e 3789c122
...@@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) ...@@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
ifeq ($(cc-name),clang)
KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128
else
KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian KBUILD_CPPFLAGS += -mbig-endian
......
...@@ -39,7 +39,7 @@ struct mod_arch_specific { ...@@ -39,7 +39,7 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym); Elf64_Sym *sym);
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base; extern u64 module_alloc_base;
......
...@@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
} }
} }
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); extern void __sync_icache_dcache(pte_t pteval);
/* /*
* PTE bits configuration in the presence of hardware Dirty Bit Management * PTE bits configuration in the presence of hardware Dirty Bit Management
...@@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t old_pte; pte_t old_pte;
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte, addr); __sync_icache_dcache(pte);
/* /*
* If the existing pte is valid, check for potential race with * If the existing pte is valid, check for potential race with
......
...@@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ...@@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
static const struct midr_range kpti_safe_list[] = { static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
}; };
char const *str = "command line option"; char const *str = "command line option";
......
...@@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, ...@@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
} }
#ifdef CONFIG_ARM64_ERRATUM_843419 #ifdef CONFIG_ARM64_ERRATUM_843419
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
{ {
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init; &mod->arch.init;
......
...@@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) ...@@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
insn &= ~BIT(31); insn &= ~BIT(31);
} else { } else {
/* out of range for ADR -> emit a veneer */ /* out of range for ADR -> emit a veneer */
val = module_emit_adrp_veneer(mod, place, val & ~0xfff); val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
if (!val) if (!val)
return -ENOEXEC; return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val, insn = aarch64_insn_gen_branch_imm((u64)place, val,
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/user.h> #include <linux/user.h>
...@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, ...@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
switch (note_type) { switch (note_type) {
case NT_ARM_HW_BREAK: case NT_ARM_HW_BREAK:
if (idx < ARM_MAX_BRP) if (idx >= ARM_MAX_BRP)
goto out;
idx = array_index_nospec(idx, ARM_MAX_BRP);
bp = tsk->thread.debug.hbp_break[idx]; bp = tsk->thread.debug.hbp_break[idx];
break; break;
case NT_ARM_HW_WATCH: case NT_ARM_HW_WATCH:
if (idx < ARM_MAX_WRP) if (idx >= ARM_MAX_WRP)
goto out;
idx = array_index_nospec(idx, ARM_MAX_WRP);
bp = tsk->thread.debug.hbp_watch[idx]; bp = tsk->thread.debug.hbp_watch[idx];
break; break;
} }
out:
return bp; return bp;
} }
...@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, ...@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
{ {
int ret; int ret;
u32 kdata; u32 kdata;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
/* Watchpoint */ /* Watchpoint */
if (num < 0) { if (num < 0) {
ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
...@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, ...@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
} else { } else {
ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
} }
set_fs(old_fs);
if (!ret) if (!ret)
ret = put_user(kdata, data); ret = put_user(kdata, data);
...@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, ...@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
{ {
int ret; int ret;
u32 kdata = 0; u32 kdata = 0;
mm_segment_t old_fs = get_fs();
if (num == 0) if (num == 0)
return 0; return 0;
...@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, ...@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
if (ret) if (ret)
return ret; return ret;
set_fs(KERNEL_DS);
if (num < 0) if (num < 0)
ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
else else
ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
set_fs(old_fs);
return ret; return ret;
} }
......
...@@ -277,6 +277,7 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) ...@@ -277,6 +277,7 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
* If we were single stepping, we want to get the step exception after * If we were single stepping, we want to get the step exception after
* we return from the trap. * we return from the trap.
*/ */
if (user_mode(regs))
user_fastforward_single_step(current); user_fastforward_single_step(current);
} }
......
...@@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ ...@@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fomit-frame-pointer -fcall-saved-x18 -fomit-frame-pointer
CFLAGS_REMOVE_atomic_ll_sc.o := -pg CFLAGS_REMOVE_atomic_ll_sc.o := -pg
GCOV_PROFILE_atomic_ll_sc.o := n
KASAN_SANITIZE_atomic_ll_sc.o := n
KCOV_INSTRUMENT_atomic_ll_sc.o := n
UBSAN_SANITIZE_atomic_ll_sc.o := n
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
...@@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
flush_ptrace_access(vma, page, uaddr, dst, len); flush_ptrace_access(vma, page, uaddr, dst, len);
} }
void __sync_icache_dcache(pte_t pte, unsigned long addr) void __sync_icache_dcache(pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/nospec.h>
#include <kvm/iodev.h> #include <kvm/iodev.h>
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
...@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu, ...@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
if (n > vgic_v3_max_apr_idx(vcpu)) if (n > vgic_v3_max_apr_idx(vcpu))
return 0; return 0;
n = array_index_nospec(n, 4);
/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
return vgicv3->vgic_ap1r[n]; return vgicv3->vgic_ap1r[n];
} }
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
#include <linux/interrupt.h> #include <linux/nospec.h>
#include <linux/irq.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include "vgic.h" #include "vgic.h"
...@@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, ...@@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid) u32 intid)
{ {
/* SGIs and PPIs */ /* SGIs and PPIs */
if (intid <= VGIC_MAX_PRIVATE) if (intid <= VGIC_MAX_PRIVATE) {
intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
return &vcpu->arch.vgic_cpu.private_irqs[intid]; return &vcpu->arch.vgic_cpu.private_irqs[intid];
}
/* SPIs */ /* SPIs */
if (intid <= VGIC_MAX_SPI) if (intid <= VGIC_MAX_SPI) {
intid = array_index_nospec(intid, VGIC_MAX_SPI);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
}
/* LPIs */ /* LPIs */
if (intid >= VGIC_MIN_LPI) if (intid >= VGIC_MIN_LPI)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment