Commit a956f4e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Three arm64 fixes for -rc8/final.

  The MTE and stolen time fixes have been doing the rounds for a little
  while, but review and testing feedback was ongoing until earlier this
  week. The kexec fix showed up on Monday and addresses a failure
  observed under Qemu.

  Summary:

   - Add missing write barrier to publish MTE tags before a pte update

   - Fix kexec relocation clobbering its own data structures

   - Fix stolen time crash if a timer IRQ fires during CPU hotplug"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mte: Ensure the cleared tags are visible before setting the PTE
  arm64: kexec: load from kimage prior to clobbering
  arm64: paravirt: Use RCU read locks to guard stolen_time
parents 3d7285a3 1d0cb4c8
......@@ -76,6 +76,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
}
/* ensure the tags are visible before the PTE is set */
smp_wmb();
}
int memcmp_pages(struct page *page1, struct page *page2)
......
......@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time *kaddr;
struct pvclock_vcpu_stolen_time __rcu *kaddr;
};
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
......@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
/* return stolen time in ns by asking the hypervisor */
static u64 para_steal_clock(int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
u64 ret = 0;
reg = per_cpu_ptr(&stolen_time_region, cpu);
......@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
* online notification callback runs. Until the callback
* has run we just return zero.
*/
if (!reg->kaddr)
rcu_read_lock();
kaddr = rcu_dereference(reg->kaddr);
if (!kaddr) {
rcu_read_unlock();
return 0;
}
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
rcu_read_unlock();
return ret;
}
static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
reg = this_cpu_ptr(&stolen_time_region);
if (!reg->kaddr)
return 0;
memunmap(reg->kaddr);
memset(reg, 0, sizeof(*reg));
kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
synchronize_rcu();
memunmap(kaddr);
return 0;
}
static int stolen_time_cpu_online(unsigned int cpu)
{
struct pvclock_vcpu_stolen_time *kaddr = NULL;
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
......@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
return -EINVAL;
reg->kaddr = memremap(res.a0,
kaddr = memremap(res.a0,
sizeof(struct pvclock_vcpu_stolen_time),
MEMREMAP_WB);
rcu_assign_pointer(reg->kaddr, kaddr);
if (!reg->kaddr) {
pr_warn("Failed to map stolen time data structure\n");
return -ENOMEM;
}
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
le32_to_cpu(reg->kaddr->attributes) != 0) {
if (le32_to_cpu(kaddr->revision) != 0 ||
le32_to_cpu(kaddr->attributes) != 0) {
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
return -ENXIO;
}
......
......@@ -37,6 +37,15 @@
* safe memory that has been set up to be preserved during the copy operation.
*/
SYM_CODE_START(arm64_relocate_new_kernel)
/*
* The kimage structure isn't allocated specially and may be clobbered
* during relocation. We must load any values we need from it prior to
* any relocation occurring.
*/
ldr x28, [x0, #KIMAGE_START]
ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
/* Setup the list loop variables. */
ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
......@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
ic iallu
dsb nsh
isb
ldr x4, [x0, #KIMAGE_START] /* relocation start */
ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
turn_off_mmu x12, x13
/* Start new image. */
cbz x1, .Lel1
mov x1, x4 /* relocation start */
mov x2, x0 /* dtb address */
cbz x27, .Lel1
mov x1, x28 /* kernel entry point */
mov x2, x26 /* dtb address */
mov x3, xzr
mov x4, xzr
mov x0, #HVC_SOFT_RESTART
hvc #0 /* Jumps from el2 */
.Lel1:
mov x0, x26 /* dtb address */
mov x1, xzr
mov x2, xzr
mov x3, xzr
br x4 /* Jumps from el1 */
br x28 /* Jumps from el1 */
SYM_CODE_END(arm64_relocate_new_kernel)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment