Commit 2d634994 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:

 - omit EFI memory map sorting, which was recently introduced, but
   caused problems with the decompressor due to additional sections
   being emitted.

 - avoid unaligned load fault-generating instructions in the
   decompressor by switching to a private unaligned implementation.

 - add a symbol into the decompressor to further debug non-boot
   situations (ld's documentation is extremely poor for how "." works,
   ld doesn't seem to follow its own documentation!)

 - parse endian information to sparse

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: add debug ".edata_real" symbol
  ARM: 8716/1: pass endianness info to sparse
  efi/libstub: arm: omit sorting of the UEFI memory map
  ARM: 8715/1: add a private asm/unaligned.h
parents f0a32ee4 dad46753
...@@ -44,10 +44,12 @@ endif ...@@ -44,10 +44,12 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
AS += -EB AS += -EB
LD += -EB LD += -EB
else else
KBUILD_CPPFLAGS += -mlittle-endian KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
AS += -EL AS += -EL
LD += -EL LD += -EL
endif endif
......
...@@ -85,6 +85,15 @@ SECTIONS ...@@ -85,6 +85,15 @@ SECTIONS
_edata = .; _edata = .;
/*
* The image_end section appears after any additional loadable sections
* that the linker may decide to insert in the binary image. Having
* this symbol allows further debug in the near future.
*/
.image_end (NOLOAD) : {
_edata_real = .;
}
_magic_sig = ZIMAGE_MAGIC(0x016f2818); _magic_sig = ZIMAGE_MAGIC(0x016f2818);
_magic_start = ZIMAGE_MAGIC(_start); _magic_start = ZIMAGE_MAGIC(_start);
_magic_end = ZIMAGE_MAGIC(_edata); _magic_end = ZIMAGE_MAGIC(_edata);
......
...@@ -20,7 +20,6 @@ generic-y += simd.h ...@@ -20,7 +20,6 @@ generic-y += simd.h
generic-y += sizes.h generic-y += sizes.h
generic-y += timex.h generic-y += timex.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += unaligned.h
generated-y += mach-types.h generated-y += mach-types.h
generated-y += unistd-nr.h generated-y += unistd-nr.h
#ifndef __ASM_ARM_UNALIGNED_H
#define __ASM_ARM_UNALIGNED_H
/*
* We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
* but we don't want to use linux/unaligned/access_ok.h since that can lead
* to traps on unaligned stm/ldm or strd/ldrd.
*/
#include <asm/byteorder.h>
#if defined(__LITTLE_ENDIAN)
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#elif defined(__BIG_ENDIAN)
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#else
# error need to define endianess
#endif
#endif /* __ASM_ARM_UNALIGNED_H */
...@@ -34,13 +34,14 @@ lib-y := efi-stub-helper.o gop.o secureboot.o ...@@ -34,13 +34,14 @@ lib-y := efi-stub-helper.o gop.o secureboot.o
lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64 # include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
arm-deps-$(CONFIG_ARM64) += sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c) $(call if_changed_rule,cc_o_c)
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
$(patsubst %.c,lib-%.o,$(arm-deps)) $(patsubst %.c,lib-%.o,$(arm-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o lib-$(CONFIG_ARM64) += arm64-stub.o
...@@ -91,5 +92,4 @@ quiet_cmd_stubcopy = STUBCPY $@ ...@@ -91,5 +92,4 @@ quiet_cmd_stubcopy = STUBCPY $@
# explicitly by the decompressor linker script. # explicitly by the decompressor linker script.
# #
STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
...@@ -350,7 +350,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, ...@@ -350,7 +350,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
* The easiest way to find adjacent regions is to sort the memory map * The easiest way to find adjacent regions is to sort the memory map
* before traversing it. * before traversing it.
*/ */
sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); if (IS_ENABLED(CONFIG_ARM64))
sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
NULL);
for (l = 0; l < map_size; l += desc_size, prev = in) { for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size; u64 paddr, size;
...@@ -367,7 +369,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, ...@@ -367,7 +369,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
* a 4k page size kernel to kexec a 64k page size kernel and * a 4k page size kernel to kexec a 64k page size kernel and
* vice versa. * vice versa.
*/ */
if (!regions_are_adjacent(prev, in) || if ((IS_ENABLED(CONFIG_ARM64) &&
!regions_are_adjacent(prev, in)) ||
!regions_have_compatible_memory_type_attrs(prev, in)) { !regions_have_compatible_memory_type_attrs(prev, in)) {
paddr = round_down(in->phys_addr, SZ_64K); paddr = round_down(in->phys_addr, SZ_64K);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment