Commit 3f35db4e authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/cpufeature' into for-next/core

* for-next/cpufeature:
  arm64: Align boot cpucap handling with system cpucap handling
  arm64: Cleanup system cpucap handling
  arm64: Kconfig: drop KAISER reference from KPTI option description
  arm64: mm: Only map KPTI trampoline if it is going to be used
  arm64: Get rid of ARM64_HAS_NO_HW_PREFETCH
parents 2cc14f52 eb15d707
...@@ -1549,7 +1549,7 @@ config ARCH_FORCE_MAX_ORDER ...@@ -1549,7 +1549,7 @@ config ARCH_FORCE_MAX_ORDER
Don't change if unsure. Don't change if unsure.
config UNMAP_KERNEL_AT_EL0 config UNMAP_KERNEL_AT_EL0
bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT bool "Unmap kernel when running in userspace (KPTI)" if EXPERT
default y default y
help help
Speculation attacks against some high-performance processors can Speculation attacks against some high-performance processors can
......
...@@ -617,6 +617,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1) ...@@ -617,6 +617,7 @@ static inline bool id_aa64pfr1_mte(u64 pfr1)
return val >= ID_AA64PFR1_EL1_MTE_MTE2; return val >= ID_AA64PFR1_EL1_MTE_MTE2;
} }
void __init setup_boot_cpu_features(void);
void __init setup_system_features(void); void __init setup_system_features(void);
void __init setup_user_features(void); void __init setup_user_features(void);
......
...@@ -1081,25 +1081,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) ...@@ -1081,25 +1081,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
/*
* Initialize the indirect array of CPU capabilities pointers before we
* handle the boot CPU below.
*/
init_cpucap_indirect_list();
/*
* Detect broken pseudo-NMI. Must be called _before_ the call to
* setup_boot_cpu_capabilities() since it interacts with
* can_use_gic_priorities().
*/
detect_system_supports_pseudo_nmi();
/*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
*/
setup_boot_cpu_capabilities();
} }
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
...@@ -1584,16 +1565,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, ...@@ -1584,16 +1565,6 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
return has_sre; return has_sre;
} }
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
{
u32 midr = read_cpuid_id();
/* Cavium ThunderX pass 1.x and 2.x */
return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
int scope) int scope)
{ {
...@@ -2321,12 +2292,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -2321,12 +2292,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP) ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP)
}, },
#endif /* CONFIG_ARM64_LSE_ATOMICS */ #endif /* CONFIG_ARM64_LSE_ATOMICS */
{
.desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH,
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch,
},
{ {
.desc = "Virtualization Host Extensions", .desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN, .capability = ARM64_HAS_VIRT_HOST_EXTN,
...@@ -3271,14 +3236,6 @@ void check_local_cpu_capabilities(void) ...@@ -3271,14 +3236,6 @@ void check_local_cpu_capabilities(void)
verify_local_cpu_capabilities(); verify_local_cpu_capabilities();
} }
static void __init setup_boot_cpu_capabilities(void)
{
/* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
enable_cpu_capabilities(SCOPE_BOOT_CPU);
}
bool this_cpu_has_cap(unsigned int n) bool this_cpu_has_cap(unsigned int n)
{ {
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) { if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
...@@ -3334,37 +3291,52 @@ unsigned long cpu_get_elf_hwcap2(void) ...@@ -3334,37 +3291,52 @@ unsigned long cpu_get_elf_hwcap2(void)
return elf_hwcap[1]; return elf_hwcap[1];
} }
void __init setup_system_features(void) static void __init setup_boot_cpu_capabilities(void)
{ {
int i;
/* /*
* The system-wide safe feature feature register values have been * The boot CPU's feature register values have been recorded. Detect
* finalized. Finalize and log the available system capabilities. * boot cpucaps and local cpucaps for the boot CPU, then enable and
* patch alternatives for the available boot cpucaps.
*/ */
update_cpu_capabilities(SCOPE_SYSTEM); update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && enable_cpu_capabilities(SCOPE_BOOT_CPU);
!cpus_have_cap(ARM64_HAS_PAN)) apply_boot_alternatives();
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); }
void __init setup_boot_cpu_features(void)
{
/* /*
* Enable all the available capabilities which have not been enabled * Initialize the indirect array of CPU capabilities pointers before we
* already. * handle the boot CPU.
*/ */
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); init_cpucap_indirect_list();
kpti_install_ng_mappings(); /*
* Detect broken pseudo-NMI. Must be called _before_ the call to
* setup_boot_cpu_capabilities() since it interacts with
* can_use_gic_priorities().
*/
detect_system_supports_pseudo_nmi();
sve_setup(); setup_boot_cpu_capabilities();
sme_setup(); }
static void __init setup_system_capabilities(void)
{
/* /*
* Check for sane CTR_EL0.CWG value. * The system-wide safe feature register values have been finalized.
* Detect, enable, and patch alternatives for the available system
* cpucaps.
*/ */
if (!cache_type_cwg()) update_cpu_capabilities(SCOPE_SYSTEM);
pr_warn("No Cache Writeback Granule information, assuming %d\n", enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
ARCH_DMA_MINALIGN); apply_alternatives_all();
for (i = 0; i < ARM64_NCAPS; i++) { /*
* Log any cpucaps with a cpumask as these aren't logged by
* update_cpu_capabilities().
*/
for (int i = 0; i < ARM64_NCAPS; i++) {
const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i]; const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
if (caps && caps->cpus && caps->desc && if (caps && caps->cpus && caps->desc &&
...@@ -3372,6 +3344,29 @@ void __init setup_system_features(void) ...@@ -3372,6 +3344,29 @@ void __init setup_system_features(void)
pr_info("detected: %s on CPU%*pbl\n", pr_info("detected: %s on CPU%*pbl\n",
caps->desc, cpumask_pr_args(caps->cpus)); caps->desc, cpumask_pr_args(caps->cpus));
} }
/*
* TTBR0 PAN doesn't have its own cpucap, so log it manually.
*/
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
}
void __init setup_system_features(void)
{
setup_system_capabilities();
kpti_install_ng_mappings();
sve_setup();
sme_setup();
/*
* Check for sane CTR_EL0.CWG value.
*/
if (!cache_type_cwg())
pr_warn("No Cache Writeback Granule information, assuming %d\n",
ARCH_DMA_MINALIGN);
} }
void __init setup_user_features(void) void __init setup_user_features(void)
......
...@@ -1171,7 +1171,7 @@ void __init sve_setup(void) ...@@ -1171,7 +1171,7 @@ void __init sve_setup(void)
unsigned long b; unsigned long b;
int max_bit; int max_bit;
if (!cpus_have_cap(ARM64_SVE)) if (!system_supports_sve())
return; return;
/* /*
...@@ -1301,7 +1301,7 @@ void __init sme_setup(void) ...@@ -1301,7 +1301,7 @@ void __init sme_setup(void)
struct vl_info *info = &vl_info[ARM64_VEC_SME]; struct vl_info *info = &vl_info[ARM64_VEC_SME];
int min_bit, max_bit; int min_bit, max_bit;
if (!cpus_have_cap(ARM64_SME)) if (!system_supports_sme())
return; return;
/* /*
......
...@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void) ...@@ -439,9 +439,8 @@ static void __init hyp_mode_check(void)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
setup_system_features();
hyp_mode_check(); hyp_mode_check();
apply_alternatives_all(); setup_system_features();
setup_user_features(); setup_user_features();
mark_linear_text_alias_ro(); mark_linear_text_alias_ro();
} }
...@@ -454,14 +453,9 @@ void __init smp_prepare_boot_cpu(void) ...@@ -454,14 +453,9 @@ void __init smp_prepare_boot_cpu(void)
* freed shortly, so we must move over to the runtime per-cpu area. * freed shortly, so we must move over to the runtime per-cpu area.
*/ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id())); set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
/* cpuinfo_store_boot_cpu();
* We now know enough about the boot CPU to apply the setup_boot_cpu_features();
* alternatives that cannot wait until interrupt handling
* and/or scheduling is enabled.
*/
apply_boot_alternatives();
/* Conditionally switch to GIC PMR for interrupt masking */ /* Conditionally switch to GIC PMR for interrupt masking */
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
......
...@@ -18,13 +18,6 @@ ...@@ -18,13 +18,6 @@
* x1 - src * x1 - src
*/ */
SYM_FUNC_START(__pi_copy_page) SYM_FUNC_START(__pi_copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
// Prefetch three cache lines ahead.
prfm pldl1strm, [x1, #128]
prfm pldl1strm, [x1, #256]
prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
ldp x2, x3, [x1] ldp x2, x3, [x1]
ldp x4, x5, [x1, #16] ldp x4, x5, [x1, #16]
ldp x6, x7, [x1, #32] ldp x6, x7, [x1, #32]
...@@ -39,10 +32,6 @@ alternative_else_nop_endif ...@@ -39,10 +32,6 @@ alternative_else_nop_endif
1: 1:
tst x0, #(PAGE_SIZE - 1) tst x0, #(PAGE_SIZE - 1)
alternative_if ARM64_HAS_NO_HW_PREFETCH
prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
stnp x2, x3, [x0, #-256] stnp x2, x3, [x0, #-256]
ldp x2, x3, [x1] ldp x2, x3, [x1]
stnp x4, x5, [x0, #16 - 256] stnp x4, x5, [x0, #16 - 256]
......
...@@ -674,6 +674,9 @@ static int __init map_entry_trampoline(void) ...@@ -674,6 +674,9 @@ static int __init map_entry_trampoline(void)
{ {
int i; int i;
if (!arm64_kernel_unmapped_at_el0())
return 0;
pgprot_t prot = kernel_exec_prot(); pgprot_t prot = kernel_exec_prot();
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
......
...@@ -40,7 +40,6 @@ HAS_LDAPR ...@@ -40,7 +40,6 @@ HAS_LDAPR
HAS_LSE_ATOMICS HAS_LSE_ATOMICS
HAS_MOPS HAS_MOPS
HAS_NESTED_VIRT HAS_NESTED_VIRT
HAS_NO_HW_PREFETCH
HAS_PAN HAS_PAN
HAS_S1PIE HAS_S1PIE
HAS_RAS_EXTN HAS_RAS_EXTN
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment