Commit 43f0b562 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Minor fixes all over, ranging from typos to tests to errata
  workarounds:

   - Fix possible memory hotplug failure with KASLR

   - Fix FFR value in SVE kselftest

   - Fix backtraces reported in /proc/$pid/stack

   - Disable broken CnP implementation on NVIDIA Carmel

   - Typo fixes and ACPI documentation clarification

   - Fix some W=1 warnings"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: kernel: disable CNP on Carmel
  arm64/process.c: fix Wmissing-prototypes build warnings
  kselftest/arm64: sve: Do not use non-canonical FFR register value
  arm64: mm: correct the inside linear map range during hotplug check
  arm64: kdump: update ppos when reading elfcorehdr
  arm64: cpuinfo: Fix a typo
  Documentation: arm64/acpi : clarify arm64 support of IBFT
  arm64: stacktrace: don't trace arch_stack_walk()
  arm64: csum: cast to the proper type
parents 7aae5432 20109a85
...@@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories: ...@@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT - Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT, - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO, IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
TCPA, TPM2, UEFI, XENV STAO, TCPA, TPM2, UEFI, XENV
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT, - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
====== ======================================================================== ====== ========================================================================
Table Usage for ARMv8 Linux Table Usage for ARMv8 Linux
......
...@@ -130,6 +130,9 @@ stable kernels. ...@@ -130,6 +130,9 @@ stable kernels.
| Marvell | ARM-MMU-500 | #582743 | N/A | | Marvell | ARM-MMU-500 | #582743 | N/A |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
......
...@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041 ...@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y. If unsure, say Y.
config NVIDIA_CARMEL_CNP_ERRATUM
bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
default y
help
If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
invalidate shared TLB entries installed by a different core, as it would
on standard ARM cores.
If unsure, say Y.
config SOCIONEXT_SYNQUACER_PREITS config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y default y
......
...@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--n > 0); } while (--n > 0);
sum += ((sum >> 32) | (sum << 32)); sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32)); return csum_fold((__force __wsum)(sum >> 32));
} }
#define ip_fast_csum ip_fast_csum #define ip_fast_csum ip_fast_csum
......
...@@ -66,7 +66,8 @@ ...@@ -66,7 +66,8 @@
#define ARM64_WORKAROUND_1508412 58 #define ARM64_WORKAROUND_1508412 58
#define ARM64_HAS_LDAPR 59 #define ARM64_HAS_LDAPR 59
#define ARM64_KVM_PROTECTED_MODE 60 #define ARM64_KVM_PROTECTED_MODE 60
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
#define ARM64_NCAPS 61 #define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_switch_to(struct task_struct *prev, extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next); struct task_struct *next);
asmlinkage void arm64_preempt_schedule_irq(void);
#define task_pt_regs(p) \ #define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
......
...@@ -55,6 +55,8 @@ void arch_setup_new_exec(void); ...@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec #define arch_setup_new_exec arch_setup_new_exec
void arch_release_task_struct(struct task_struct *tsk); void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src);
#endif #endif
......
...@@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
0, 0, 0, 0,
1, 0), 1, 0),
}, },
#endif
#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
{
/* NVIDIA Carmel */
.desc = "NVIDIA Carmel CNP erratum",
.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
},
#endif #endif
{ {
} }
......
...@@ -1324,6 +1324,9 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope) ...@@ -1324,6 +1324,9 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
if (is_kdump_kernel()) if (is_kdump_kernel())
return false; return false;
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope); return has_cpuid_feature(entry, scope);
} }
......
...@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) ...@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
* with the CLIDR_EL1 fields to avoid triggering false warnings * with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the * when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for * effective value of the CTR_EL0 in our internal records for
* acurate sanity check and feature enablement. * accurate sanity check and feature enablement.
*/ */
info->reg_ctr = read_cpuid_effective_cachetype(); info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0); info->reg_dczid = read_cpuid(DCZID_EL0);
......
...@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, ...@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{ {
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
*ppos += count;
return count; return count;
} }
...@@ -57,6 +57,8 @@ ...@@ -57,6 +57,8 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/pointer_auth.h> #include <asm/pointer_auth.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
......
...@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) ...@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
struct task_struct *task, struct pt_regs *regs) void *cookie, struct task_struct *task,
struct pt_regs *regs)
{ {
struct stackframe frame; struct stackframe frame;
...@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, ...@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc); start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current) else if (task == current)
start_backtrace(&frame, start_backtrace(&frame,
(unsigned long)__builtin_frame_address(0), (unsigned long)__builtin_frame_address(1),
(unsigned long)arch_stack_walk); (unsigned long)__builtin_return_address(0));
else else
start_backtrace(&frame, thread_saved_fp(task), start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task)); thread_saved_pc(task));
......
...@@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) ...@@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void) struct range arch_get_mappable_range(void)
{ {
struct range mhp_range; struct range mhp_range;
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
u64 end_linear_pa = __pa(PAGE_END - 1);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
/*
* Check for a wrap, it is possible because of randomized linear
* mapping the start physical address is actually bigger than
* the end physical address. In this case set start to zero
* because [0, end_linear_pa] range must still be able to cover
* all addressable physical addresses.
*/
if (start_linear_pa > end_linear_pa)
start_linear_pa = 0;
}
WARN_ON(start_linear_pa > end_linear_pa);
/* /*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
...@@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void) ...@@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
* range which can be mapped inside this linear mapping range, must * range which can be mapped inside this linear mapping range, must
* also be derived from its end points. * also be derived from its end points.
*/ */
mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); mhp_range.start = start_linear_pa;
mhp_range.end = __pa(PAGE_END - 1); mhp_range.end = end_linear_pa;
return mhp_range; return mhp_range;
} }
......
...@@ -284,16 +284,28 @@ endfunction ...@@ -284,16 +284,28 @@ endfunction
// Set up test pattern in the FFR // Set up test pattern in the FFR
// x0: pid // x0: pid
// x2: generation // x2: generation
//
// We need to generate a canonical FFR value, which consists of a number of
// low "1" bits, followed by a number of zeros. This gives us 17 unique values
// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
// generation, and use that as the initial number of ones in the pattern.
// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0. // Beware: corrupts P0.
function setup_ffr function setup_ffr
mov x4, x30 mov x4, x30
bl pattern and w0, w0, #0x3
bfi w0, w2, #2, #2
mov w1, #1
lsl w1, w1, w0
sub w1, w1, #1
ldr x0, =ffrref ldr x0, =ffrref
ldr x1, =scratch strh w1, [x0], 2
rdvl x2, #1 rdvl x1, #1
lsr x2, x2, #3 lsr x1, x1, #3
bl memcpy sub x1, x1, #2
bl memclr
mov x0, #0 mov x0, #0
ldr x1, =ffrref ldr x1, =ffrref
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment