Commit f4c8824c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "An unfortunately large collection of arm64 fixes for -rc5.

  Some of this is absolutely trivial, but the alternatives, vDSO and CPU
  errata workaround fixes are significant. At least people are finding
  and fixing these things, I suppose.

   - Fix workaround for CPU erratum #1418040 to disable the compat vDSO

   - Fix Oops when single-stepping with KGDB

   - Fix memory attributes for hypervisor device mappings at EL2

   - Fix memory leak in PSCI and remove useless variable assignment

   - Fix up some comments and asm labels in our entry code

   - Fix broken register table formatting in our generated html docs

   - Fix missing NULL sentinel in CPU errata workaround list

   - Fix patching of branches in alternative instruction sections"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/alternatives: don't patch up internal branches
  arm64: Add missing sentinel to erratum_1463225
  arm64: Documentation: Fix broken table in generated HTML
  arm64: kgdb: Fix single-step exception handling oops
  arm64: entry: Tidy up block comments and label numbers
  arm64: Rework ARM_ERRATUM_1414080 handling
  arm64: arch_timer: Disable the compat vdso for cores affected by ARM64_WORKAROUND_1418040
  arm64: arch_timer: Allow an workaround descriptor to disable compat vdso
  arm64: Introduce a way to disable the 32bit vdso
  arm64: entry: Fix the typo in the comment of el1_dbg()
  drivers/firmware/psci: Assign @err directly in hotplug_tests()
  drivers/firmware/psci: Fix memory leakage in alloc_init_cpu_groups()
  KVM: arm64: Fix definition of PAGE_HYP_DEVICE
parents e8749d06 5679b281
...@@ -171,6 +171,7 @@ infrastructure: ...@@ -171,6 +171,7 @@ infrastructure:
3) ID_AA64PFR1_EL1 - Processor Feature Register 1 3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+------------------------------+---------+---------+ +------------------------------+---------+---------+
| Name | bits | visible | | Name | bits | visible |
+------------------------------+---------+---------+ +------------------------------+---------+---------+
...@@ -181,6 +182,7 @@ infrastructure: ...@@ -181,6 +182,7 @@ infrastructure:
4) MIDR_EL1 - Main ID Register 4) MIDR_EL1 - Main ID Register
+------------------------------+---------+---------+ +------------------------------+---------+---------+
| Name | bits | visible | | Name | bits | visible |
+------------------------------+---------+---------+ +------------------------------+---------+---------+
......
...@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround { ...@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround {
u64 (*read_cntvct_el0)(void); u64 (*read_cntvct_el0)(void);
int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
int (*set_next_event_virt)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
bool disable_compat_vdso;
}; };
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
......
...@@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings; ...@@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings;
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
#define PAGE_S2_MEMATTR(attr) \ #define PAGE_S2_MEMATTR(attr) \
({ \ ({ \
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
#define __ASM_VDSOCLOCKSOURCE_H #define __ASM_VDSOCLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \ #define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_ARCHTIMER /* vdso clocksource for both 32 and 64bit tasks */ \
VDSO_CLOCKMODE_ARCHTIMER, \
/* vdso clocksource for 64bit tasks only */ \
VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
#endif #endif
...@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) ...@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
* update. Return something. Core will do another round and then * update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall. * see the mode change and fallback to the syscall.
*/ */
if (clock_mode == VDSO_CLOCKMODE_NONE) if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
return 0; return 0;
/* /*
...@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) ...@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return ret; return ret;
} }
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
{
return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
}
#define vdso_clocksource_ok vdso_clocksource_ok
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
...@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature) ...@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
*/ */
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{ {
unsigned long replptr; unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
return !(pc >= replptr && pc <= (replptr + alt->alt_len));
if (kernel_text_address(pc))
return true;
replptr = (unsigned long)ALT_REPL_PTR(alt);
if (pc >= replptr && pc <= (replptr + alt->alt_len))
return false;
/*
* Branching into *another* alternate sequence is doomed, and
* we're not even trying to fix it up.
*/
BUG();
} }
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
......
...@@ -782,6 +782,7 @@ static const struct midr_range erratum_1463225[] = { ...@@ -782,6 +782,7 @@ static const struct midr_range erratum_1463225[] = {
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
}; };
#endif #endif
......
...@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr) ...@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
/* /*
* The CPU masked interrupts, and we are leaving them masked during * The CPU masked interrupts, and we are leaving them masked during
* do_debug_exception(). Update PMR as if we had called * do_debug_exception(). Update PMR as if we had called
* local_mask_daif(). * local_daif_mask().
*/ */
if (system_uses_irq_prio_masking()) if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
......
...@@ -126,8 +126,10 @@ alternative_else_nop_endif ...@@ -126,8 +126,10 @@ alternative_else_nop_endif
add \dst, \dst, #(\sym - .entry.tramp.text) add \dst, \dst, #(\sym - .entry.tramp.text)
.endm .endm
// This macro corrupts x0-x3. It is the caller's duty /*
// to save/restore them if required. * This macro corrupts x0-x3. It is the caller's duty to save/restore
* them if required.
*/
.macro apply_ssbd, state, tmp1, tmp2 .macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD #ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling alternative_cb arm64_enable_wa2_handling
...@@ -167,13 +169,28 @@ alternative_cb_end ...@@ -167,13 +169,28 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14] stp x28, x29, [sp, #16 * 14]
.if \el == 0 .if \el == 0
.if \regsize == 32
/*
* If we're returning from a 32-bit task on a system affected by
* 1418040 then re-enable userspace access to the virtual counter.
*/
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if ARM64_WORKAROUND_1418040
mrs x0, cntkctl_el1
orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
msr cntkctl_el1, x0
alternative_else_nop_endif
#endif
.endif
clear_gp_regs clear_gp_regs
mrs x21, sp_el0 mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20 ldr_this_cpu tsk, __entry_task, x20
msr sp_el0, tsk msr sp_el0, tsk
// Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions /*
// when scheduling. * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
* when scheduling.
*/
ldr x19, [tsk, #TSK_TI_FLAGS] ldr x19, [tsk, #TSK_TI_FLAGS]
disable_step_tsk x19, x20 disable_step_tsk x19, x20
...@@ -320,6 +337,14 @@ alternative_else_nop_endif ...@@ -320,6 +337,14 @@ alternative_else_nop_endif
tst x22, #PSR_MODE32_BIT // native task? tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f b.eq 3f
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if ARM64_WORKAROUND_1418040
mrs x0, cntkctl_el1
bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
msr cntkctl_el1, x0
alternative_else_nop_endif
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719 #ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719 alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR #ifdef CONFIG_PID_IN_CONTEXTIDR
...@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719 ...@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif alternative_else_nop_endif
#endif #endif
3: 3:
#ifdef CONFIG_ARM64_ERRATUM_1418040
alternative_if_not ARM64_WORKAROUND_1418040
b 4f
alternative_else_nop_endif
/*
* if (x22.mode32 == cntkctl_el1.el0vcten)
* cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
*/
mrs x1, cntkctl_el1
eon x0, x1, x22, lsr #3
tbz x0, #1, 4f
eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
msr cntkctl_el1, x1
4:
#endif
scs_save tsk, x0 scs_save tsk, x0
/* No kernel C function calls after this as user keys are set. */ /* No kernel C function calls after this as user keys are set. */
...@@ -377,11 +387,11 @@ alternative_else_nop_endif ...@@ -377,11 +387,11 @@ alternative_else_nop_endif
.if \el == 0 .if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 5f bne 4f
msr far_el1, x30 msr far_el1, x30
tramp_alias x30, tramp_exit_native tramp_alias x30, tramp_exit_native
br x30 br x30
5: 4:
tramp_alias x30, tramp_exit_compat tramp_alias x30, tramp_exit_compat
br x30 br x30
#endif #endif
......
...@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) ...@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
if (!kgdb_single_step) if (!kgdb_single_step)
return DBG_HOOK_ERROR; return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs); kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED; return DBG_HOOK_HANDLED;
} }
NOKPROBE_SYMBOL(kgdb_step_brk_fn); NOKPROBE_SYMBOL(kgdb_step_brk_fn);
......
...@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { ...@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.set_next_event_virt = erratum_set_next_event_tval_virt, .set_next_event_virt = erratum_set_next_event_tval_virt,
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_1418040
{
.match_type = ate_match_local_cap_id,
.id = (void *)ARM64_WORKAROUND_1418040,
.desc = "ARM erratum 1418040",
.disable_compat_vdso = true,
},
#endif
}; };
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
...@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa ...@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
if (wa->read_cntvct_el0) { if (wa->read_cntvct_el0) {
clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
vdso_default = VDSO_CLOCKMODE_NONE; vdso_default = VDSO_CLOCKMODE_NONE;
} else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
clocksource_counter.vdso_clock_mode = vdso_default;
} }
} }
......
...@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) ...@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL); GFP_KERNEL);
if (!cpu_groups) if (!cpu_groups) {
free_cpumask_var(tmp);
return -ENOMEM; return -ENOMEM;
}
cpumask_copy(tmp, cpu_online_mask); cpumask_copy(tmp, cpu_online_mask);
...@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) ...@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
topology_core_cpumask(cpumask_any(tmp)); topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups); free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM; return -ENOMEM;
} }
...@@ -196,13 +199,12 @@ static int hotplug_tests(void) ...@@ -196,13 +199,12 @@ static int hotplug_tests(void)
if (!page_buf) if (!page_buf)
goto out_free_cpu_groups; goto out_free_cpu_groups;
err = 0;
/* /*
* Of course the last CPU cannot be powered down and cpu_down() should * Of course the last CPU cannot be powered down and cpu_down() should
* refuse doing that. * refuse doing that.
*/ */
pr_info("Trying to turn off and on again all CPUs\n"); pr_info("Trying to turn off and on again all CPUs\n");
err += down_and_up_cpus(cpu_online_mask, offlined_cpus); err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
/* /*
* Take down CPUs by cpu group this time. When the last CPU is turned * Take down CPUs by cpu group this time. When the last CPU is turned
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment