Commit 55d728b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "arm64 fixes that came in during the merge window.

  There will probably be more to come, but it doesn't seem like it's
  worth me sitting on these in the meantime.

   - Fix SCS debug check to report max stack usage in bytes as advertised

   - Fix typo: CONFIG_FTRACE_WITH_REGS => CONFIG_DYNAMIC_FTRACE_WITH_REGS

   - Fix incorrect mask in HiSilicon L3C perf PMU driver

   - Fix compat vDSO compilation under some toolchain configurations

   - Fix false UBSAN warning from ACPI IORT parsing code

   - Fix booting under bootloaders that ignore TEXT_OFFSET

   - Annotate debug initcall function with '__init'"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: warn on incorrect placement of the kernel by the bootloader
  arm64: acpi: fix UBSAN warning
  arm64: vdso32: add CONFIG_THUMB2_COMPAT_VDSO
  drivers/perf: hisi: Fix wrong value for all counters enable
  arm64: ftrace: Change CONFIG_FTRACE_WITH_REGS to CONFIG_DYNAMIC_FTRACE_WITH_REGS
  arm64: debug: mark a function as __init to save some memory
  scs: Report SCS usage in bytes rather than number of entries
parents d3ea6934 dd4bc607
...@@ -1299,6 +1299,14 @@ config COMPAT_VDSO ...@@ -1299,6 +1299,14 @@ config COMPAT_VDSO
You must have a 32-bit build of glibc 2.22 or later for programs You must have a 32-bit build of glibc 2.22 or later for programs
to seamlessly take advantage of this. to seamlessly take advantage of this.
config THUMB2_COMPAT_VDSO
bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
depends on COMPAT_VDSO
default y
help
Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
otherwise with '-marm'.
menuconfig ARMV8_DEPRECATED menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions" bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on SYSCTL depends on SYSCTL
...@@ -1740,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING ...@@ -1740,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING
endif endif
config RELOCATABLE config RELOCATABLE
bool bool "Build a relocatable kernel image" if EXPERT
select ARCH_HAS_RELR select ARCH_HAS_RELR
default y
help help
This builds the kernel as a Position Independent Executable (PIE), This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the which retains all relocation metadata required to relocate the
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/psci.h> #include <linux/psci.h>
#include <linux/stddef.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -31,14 +32,14 @@ ...@@ -31,14 +32,14 @@
* is therefore used to delimit the MADT GICC structure minimum length * is therefore used to delimit the MADT GICC structure minimum length
* appropriately. * appropriately.
*/ */
#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \ #define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
struct acpi_madt_generic_interrupt, efficiency_class) struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \ #define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
(unsigned long)(entry) + (entry)->header.length > (end)) (unsigned long)(entry) + (entry)->header.length > (end))
#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \ #define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16)) spe_interrupt) + sizeof(u16))
/* Basic configuration for ACPI */ /* Basic configuration for ACPI */
......
...@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu) ...@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu)
return 0; return 0;
} }
static int debug_monitors_init(void) static int __init debug_monitors_init(void)
{ {
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
"arm64/debug_monitors:starting", "arm64/debug_monitors:starting",
......
...@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) ...@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR) if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX]; return &plt[FTRACE_PLT_IDX];
if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS)) if (addr == FTRACE_REGS_ADDR &&
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX]; return &plt[FTRACE_REGS_PLT_IDX];
#endif #endif
return NULL; return NULL;
......
...@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p)
xen_early_init(); xen_early_init();
efi_init(); efi_init();
if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
arm64_memblock_init(); arm64_memblock_init();
paging_init(); paging_init();
......
...@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*' ...@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*'
VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow) VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
VDSO_CFLAGS += -Wno-int-to-pointer-cast VDSO_CFLAGS += -Wno-int-to-pointer-cast
# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
# unreliable.
ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
VDSO_CFLAGS += -mthumb -fomit-frame-pointer
else
VDSO_CFLAGS += -marm
endif
VDSO_AFLAGS := $(VDSO_CAFLAGS) VDSO_AFLAGS := $(VDSO_CAFLAGS)
VDSO_AFLAGS += -D__ASSEMBLY__ VDSO_AFLAGS += -D__ASSEMBLY__
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
/* L3C has 8-counters */ /* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8 #define L3C_NR_COUNTERS 0x8
#define L3C_PERF_CTRL_EN 0x20000 #define L3C_PERF_CTRL_EN 0x10000
#define L3C_EVTYPE_NONE 0xff #define L3C_EVTYPE_NONE 0xff
/* /*
......
...@@ -74,7 +74,7 @@ static void scs_check_usage(struct task_struct *tsk) ...@@ -74,7 +74,7 @@ static void scs_check_usage(struct task_struct *tsk)
for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
if (!READ_ONCE_NOCHECK(*p)) if (!READ_ONCE_NOCHECK(*p))
break; break;
used++; used += sizeof(*p);
} }
while (used > curr) { while (used > curr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment