Commit 9b971e77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Apologies for this being so late, but we've uncovered a few nasty
  issues on arm64 which didn't settle down until yesterday and the fixes
  all look suitable for 4.3.  Of the four patches, three of them are
  Cc'd to stable, with the remaining patch fixing an issue that only
  took effect during the merge window.

  Summary:

   - Fix corruption in SWP emulation when STXR fails due to contention
   - Fix MMU re-initialisation when resuming from a low-power state
   - Fix stack unwinding code to match what ftrace expects
   - Fix relocation code in the EFI stub when DRAM base is not 2MB aligned"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/efi: do not assume DRAM base is aligned to 2 MB
  Revert "ARM64: unwind: Fix PC calculation"
  arm64: kernel: fix tcr_el1.t0sz restore on systems with extended idmap
  arm64: compat: fix stxr failure case in SWP emulation
parents 7c0f488f 73effccb
...@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table) ...@@ -284,21 +284,23 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
__asm__ __volatile__( \ __asm__ __volatile__( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \ CONFIG_ARM64_PAN) \
" mov %w2, %w1\n" \ "0: ldxr"B" %w2, [%3]\n" \
"0: ldxr"B" %w1, [%3]\n" \ "1: stxr"B" %w0, %w1, [%3]\n" \
"1: stxr"B" %w0, %w2, [%3]\n" \
" cbz %w0, 2f\n" \ " cbz %w0, 2f\n" \
" mov %w0, %w4\n" \ " mov %w0, %w4\n" \
" b 3f\n" \
"2:\n" \ "2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
"3: mov %w0, %w5\n" \ "4: mov %w0, %w5\n" \
" b 2b\n" \ " b 3b\n" \
" .popsection" \ " .popsection" \
" .pushsection __ex_table,\"a\"\n" \ " .pushsection __ex_table,\"a\"\n" \
" .align 3\n" \ " .align 3\n" \
" .quad 0b, 3b\n" \ " .quad 0b, 4b\n" \
" .quad 1b, 3b\n" \ " .quad 1b, 4b\n" \
" .popsection\n" \ " .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \ CONFIG_ARM64_PAN) \
......
...@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, ...@@ -25,10 +25,20 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long kernel_size, kernel_memsize = 0; unsigned long kernel_size, kernel_memsize = 0;
unsigned long nr_pages; unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr; void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
/*
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
* a 2 MB aligned base, which itself may be lower than dram_base, as
* long as the resulting offset equals or exceeds it.
*/
preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
if (preferred_offset < dram_base)
preferred_offset += SZ_2M;
/* Relocate the image, if required. */ /* Relocate the image, if required. */
kernel_size = _edata - _text; kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) { if (*image_addr != preferred_offset) {
kernel_memsize = kernel_size + (_end - _edata); kernel_memsize = kernel_size + (_end - _edata);
/* /*
...@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg, ...@@ -42,7 +52,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address * Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'. * 'dram_base + TEXT_OFFSET'.
*/ */
*image_addr = *reserve_addr = dram_base + TEXT_OFFSET; *image_addr = *reserve_addr = preferred_offset;
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) / nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
EFI_PAGE_SIZE; EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS, status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
......
...@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame) ...@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->sp = fp + 0x10; frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp); frame->fp = *(unsigned long *)(fp);
/* frame->pc = *(unsigned long *)(fp + 8);
* -4 here because we care about the PC at time of bl,
* not where the return will go.
*/
frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0; return 0;
} }
......
...@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) ...@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
if (ret == 0) { if (ret == 0) {
/* /*
* We are resuming from reset with TTBR0_EL1 set to the * We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in * idmap to enable the MMU; set the TTBR0 to the reserved
* TTBR0_EL1 unless the active_mm == &init_mm, in which case * page tables to prevent speculative TLB allocations, flush
* the thread entered cpu_suspend with TTBR0_EL1 set to * the local tlb and set the default tcr_el1.t0sz so that
* reserved TTBR0 page tables and should be restored as such. * the TTBR0 address space set-up is properly restored.
* If the current active_mm != &init_mm we entered cpu_suspend
* with mappings in TTBR0 that must be restored, so we switch
* them back to complete the address space configuration
* restoration before returning.
*/ */
if (mm == &init_mm)
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
else
cpu_switch_mm(mm->pgd, mm);
flush_tlb_all(); flush_tlb_all();
cpu_set_default_tcr_t0sz();
if (mm != &init_mm)
cpu_switch_mm(mm->pgd, mm);
/* /*
* Restore per-cpu offset before any kernel * Restore per-cpu offset before any kernel
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment