Commit cf3488fa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "It's a lot smaller than last week, with the star of the show being a
  couple of fixes to head.S addressing a boot regression introduced by
  the recent overhaul of that code in non-default configurations (i.e.
  KASLR disabled).

  The first of those two resolves the issue reported (and bisected) by
  Mikulus in the wait_on_bit() thread.

  Summary:

   - Fix two boot issues caused by the recent head.S rework when !KASLR

   - Fix calculation of crashkernel memory reservation

   - Fix bogus error check in PMU IRQ probing code"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mm: Reserve enough pages for the initial ID map
  perf/arm_pmu_platform: fix tests for platform_get_irq() failure
  arm64: head: Ignore bogus KASLR displacement on non-relocatable kernels
  arm64/kexec: Fix missing extra range for crashkres_low.
parents 42e66b1c 5fbc49ce
...@@ -64,28 +64,28 @@ ...@@ -64,28 +64,28 @@
#define EARLY_KASLR (0) #define EARLY_KASLR (0)
#endif #endif
#define EARLY_ENTRIES(vstart, vend, shift) \ #define EARLY_ENTRIES(vstart, vend, shift, add) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
#if SWAPPER_PGTABLE_LEVELS > 3 #if SWAPPER_PGTABLE_LEVELS > 3
#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT)) #define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
#else #else
#define EARLY_PUDS(vstart, vend) (0) #define EARLY_PUDS(vstart, vend, add) (0)
#endif #endif
#if SWAPPER_PGTABLE_LEVELS > 2 #if SWAPPER_PGTABLE_LEVELS > 2
#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT)) #define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
#else #else
#define EARLY_PMDS(vstart, vend) (0) #define EARLY_PMDS(vstart, vend, add) (0)
#endif #endif
#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \ #define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
/* the initial ID map may need two extra pages if it needs to be extended */ /* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48 #if VA_BITS < 48
...@@ -93,7 +93,7 @@ ...@@ -93,7 +93,7 @@
#else #else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE) #define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif #endif
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE) #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */ /* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS #if ARM64_KERNEL_USES_PMD_MAPS
......
...@@ -371,7 +371,9 @@ SYM_FUNC_END(create_idmap) ...@@ -371,7 +371,9 @@ SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping) SYM_FUNC_START_LOCAL(create_kernel_mapping)
adrp x0, init_pg_dir adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR // compile time __va(_text) mov_q x5, KIMAGE_VADDR // compile time __va(_text)
#ifdef CONFIG_RELOCATABLE
add x5, x5, x23 // add KASLR displacement add x5, x5, x23 // add KASLR displacement
#endif
adrp x6, _end // runtime __pa(_end) adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text) adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text sub x6, x6, x3 // _end - _text
......
...@@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) ...@@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i; u64 i;
phys_addr_t start, end; phys_addr_t start, end;
nr_ranges = 1; /* for exclusion of crashkernel region */ nr_ranges = 2; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end) for_each_mem_range(i, &start, &end)
nr_ranges++; nr_ranges++;
......
...@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) ...@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) { if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0); int irq = platform_get_irq(pdev, 0);
if (irq && irq_is_percpu_devid(irq)) if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq); return pmu_parse_percpu_irq(pmu, irq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment