Commit 06f976ec authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "There are some significant fixes in here for FP state corruption,
  hardware access/dirty PTE corruption and an erratum workaround for the
  Falkor CPU.

  I'm hoping that things finally settle down now, but never say never...

  Summary:

   - Fix FPSIMD context switch regression introduced in -rc2

   - Fix ABI break with SVE CPUID register reporting

   - Fix use of uninitialised variable

   - Fixes to hardware access/dirty management and sanity checking

   - CPU erratum workaround for Falkor CPUs

   - Fix reporting of writeable+executable mappings

   - Fix signal reporting for RAS errors"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: fpsimd: Fix copying of FP state from signal frame into task struct
  arm64/sve: Report SVE to userspace via CPUID only if supported
  arm64: fix CONFIG_DEBUG_WX address reporting
  arm64: fault: avoid send SIGBUS two times
  arm64: hw_breakpoint: Use linux/uaccess.h instead of asm/uaccess.h
  arm64: Add software workaround for Falkor erratum 1041
  arm64: Define cputype macros for Falkor CPU
  arm64: mm: Fix false positives in set_pte_at access/dirty race detection
  arm64: mm: Fix pte_mkclean, pte_mkdirty semantics
  arm64: Initialise high_memory global variable earlier
parents e53000b1 a4544831
...@@ -75,3 +75,4 @@ stable kernels. ...@@ -75,3 +75,4 @@ stable kernels.
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
...@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065 ...@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
If unsure, say Y. If unsure, say Y.
config SOCIONEXT_SYNQUACER_PREITS config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y default y
...@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802 ...@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
a 128kB offset to be applied to the target address in this commands. a 128kB offset to be applied to the target address in this commands.
If unsure, say Y. If unsure, say Y.
config QCOM_FALKOR_ERRATUM_E1041
bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
default y
help
Falkor CPU may speculatively fetch instructions from an improper
memory location when MMU translation is changed from SCTLR_ELn[M]=1
to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
If unsure, say Y.
endmenu endmenu
......
...@@ -512,4 +512,14 @@ alternative_else_nop_endif ...@@ -512,4 +512,14 @@ alternative_else_nop_endif
#endif #endif
.endm .endm
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
*/
.macro pre_disable_mmu_workaround
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
isb
#endif
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -60,6 +60,9 @@ enum ftr_type { ...@@ -60,6 +60,9 @@ enum ftr_type {
#define FTR_VISIBLE true /* Feature visible to the user space */ #define FTR_VISIBLE true /* Feature visible to the user space */
#define FTR_HIDDEN false /* Feature is hidden from the user */ #define FTR_HIDDEN false /* Feature is hidden from the user */
#define FTR_VISIBLE_IF_IS_ENABLED(config) \
(IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
struct arm64_ftr_bits { struct arm64_ftr_bits {
bool sign; /* Value is signed ? */ bool sign; /* Value is signed ? */
bool visible; bool visible;
......
...@@ -91,6 +91,7 @@ ...@@ -91,6 +91,7 @@
#define BRCM_CPU_PART_VULCAN 0x516 #define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800 #define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
...@@ -99,6 +100,7 @@ ...@@ -99,6 +100,7 @@
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -42,6 +42,8 @@ ...@@ -42,6 +42,8 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <linux/mmdebug.h> #include <linux/mmdebug.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
extern void __pte_error(const char *file, int line, unsigned long val); extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val);
...@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte) ...@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
} }
static inline pte_t pte_mkdirty(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte)
{ {
return set_pte_bit(pte, __pgprot(PTE_DIRTY)); pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
if (pte_write(pte))
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
} }
static inline pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
...@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) ...@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
} }
} }
struct mm_struct;
struct vm_area_struct;
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
/* /*
...@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
* hardware updates of the pte (ptep_set_access_flags safely changes * hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry). * valid ptes without going through an invalid entry).
*/ */
if (pte_valid(*ptep) && pte_valid(pte)) { if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
VM_WARN_ONCE(!pte_young(pte), VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx", "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte)); __func__, pte_val(*ptep), pte_val(pte));
...@@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, ...@@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* /*
* ptep_set_wrprotect - mark read-only while preserving the hardware update of * ptep_set_wrprotect - mark read-only while trasferring potential hardware
* the Access Flag. * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
*/ */
#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{ {
pte_t old_pte, pte; pte_t old_pte, pte;
/*
* ptep_set_wrprotect() is only called on CoW mappings which are
* private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
* PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
* !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
* protection_map[]. There is no race with the hardware update of the
* dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
* is set.
*/
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
"%s: potential race with hardware DBM", __func__);
pte = READ_ONCE(*ptep); pte = READ_ONCE(*ptep);
do { do {
old_pte = pte; old_pte = pte;
/*
* If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
pte = pte_mkdirty(pte);
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte)); pte_val(old_pte), pte_val(pte));
......
...@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) ...@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
mrs x12, sctlr_el1 mrs x12, sctlr_el1
ldr x13, =SCTLR_ELx_FLAGS ldr x13, =SCTLR_ELx_FLAGS
bic x12, x12, x13 bic x12, x12, x13
pre_disable_mmu_workaround
msr sctlr_el1, x12 msr sctlr_el1, x12
isb isb
......
...@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ...@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
......
...@@ -96,6 +96,7 @@ ENTRY(entry) ...@@ -96,6 +96,7 @@ ENTRY(entry)
mrs x0, sctlr_el2 mrs x0, sctlr_el2
bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 0 // clear SCTLR.M
bic x0, x0, #1 << 2 // clear SCTLR.C bic x0, x0, #1 << 2 // clear SCTLR.C
pre_disable_mmu_workaround
msr sctlr_el2, x0 msr sctlr_el2, x0
isb isb
b 2f b 2f
...@@ -103,6 +104,7 @@ ENTRY(entry) ...@@ -103,6 +104,7 @@ ENTRY(entry)
mrs x0, sctlr_el1 mrs x0, sctlr_el1
bic x0, x0, #1 << 0 // clear SCTLR.M bic x0, x0, #1 << 0 // clear SCTLR.M
bic x0, x0, #1 << 2 // clear SCTLR.C bic x0, x0, #1 << 2 // clear SCTLR.C
pre_disable_mmu_workaround
msr sctlr_el1, x0 msr sctlr_el1, x0
isb isb
2: 2:
......
...@@ -1043,7 +1043,7 @@ void fpsimd_update_current_state(struct fpsimd_state *state) ...@@ -1043,7 +1043,7 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
local_bh_disable(); local_bh_disable();
current->thread.fpsimd_state = *state; current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
if (system_supports_sve() && test_thread_flag(TIF_SVE)) if (system_supports_sve() && test_thread_flag(TIF_SVE))
fpsimd_to_sve(current); fpsimd_to_sve(current);
......
...@@ -750,6 +750,7 @@ __primary_switch: ...@@ -750,6 +750,7 @@ __primary_switch:
* to take into account by discarding the current kernel mapping and * to take into account by discarding the current kernel mapping and
* creating a new one. * creating a new one.
*/ */
pre_disable_mmu_workaround
msr sctlr_el1, x20 // disable the MMU msr sctlr_el1, x20 // disable the MMU
isb isb
bl __create_page_tables // recreate kernel mapping bl __create_page_tables // recreate kernel mapping
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/uaccess.h>
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -36,7 +37,6 @@ ...@@ -36,7 +37,6 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */ /* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
......
...@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) ...@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
mrs x0, sctlr_el2 mrs x0, sctlr_el2
ldr x1, =SCTLR_ELx_FLAGS ldr x1, =SCTLR_ELx_FLAGS
bic x0, x0, x1 bic x0, x0, x1
pre_disable_mmu_workaround
msr sctlr_el2, x0 msr sctlr_el2, x0
isb isb
1: 1:
......
...@@ -151,6 +151,7 @@ reset: ...@@ -151,6 +151,7 @@ reset:
mrs x5, sctlr_el2 mrs x5, sctlr_el2
ldr x6, =SCTLR_ELx_FLAGS ldr x6, =SCTLR_ELx_FLAGS
bic x5, x5, x6 // Clear SCTL_M and etc bic x5, x5, x6 // Clear SCTL_M and etc
pre_disable_mmu_workaround
msr sctlr_el2, x5 msr sctlr_el2, x5
isb isb
......
...@@ -389,7 +389,7 @@ void ptdump_check_wx(void) ...@@ -389,7 +389,7 @@ void ptdump_check_wx(void)
.check_wx = true, .check_wx = true,
}; };
walk_pgd(&st, &init_mm, 0); walk_pgd(&st, &init_mm, VA_START);
note_page(&st, 0, 0, 0); note_page(&st, 0, 0, 0);
if (st.wx_pages || st.uxn_pages) if (st.wx_pages || st.uxn_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
......
...@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) ...@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{ {
struct siginfo info; struct siginfo info;
const struct fault_info *inf; const struct fault_info *inf;
int ret = 0;
inf = esr_to_fault_info(esr); inf = esr_to_fault_info(esr);
pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
...@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) ...@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
if (interrupts_enabled(regs)) if (interrupts_enabled(regs))
nmi_enter(); nmi_enter();
ret = ghes_notify_sea(); ghes_notify_sea();
if (interrupts_enabled(regs)) if (interrupts_enabled(regs))
nmi_exit(); nmi_exit();
...@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) ...@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, esr); arm64_notify_die("", regs, &info, esr);
return ret; return 0;
} }
static const struct fault_info fault_info[] = { static const struct fault_info fault_info[] = {
......
...@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void) ...@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
reserve_elfcorehdr(); reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit); dma_contiguous_reserve(arm64_dma_phys_limit);
memblock_allow_resize(); memblock_allow_resize();
...@@ -502,7 +504,6 @@ void __init bootmem_init(void) ...@@ -502,7 +504,6 @@ void __init bootmem_init(void)
sparse_init(); sparse_init();
zone_sizes_init(min, max); zone_sizes_init(min, max);
high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
memblock_dump_all(); memblock_dump_all();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment