Commit 714d8e7e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "Here are the core arm64 updates for 4.1.

  Highlights include a significant rework to head.S (allowing us to boot
  on machines with physical memory at a really high address), an AES
  performance boost on Cortex-A57 and the ability to run a 32-bit
  userspace with 64k pages (although this requires said userspace to be
  built with a recent binutils).

  The head.S rework spilt over into KVM, so there are some changes under
  arch/arm/ which have been acked by Marc Zyngier (KVM co-maintainer).
  In particular, the linker script changes caused us some issues in
  -next, so there are a few merge commits where we had to apply fixes on
  top of a stable branch.

  Other changes include:

   - AES performance boost for Cortex-A57
   - AArch32 (compat) userspace with 64k pages
   - Cortex-A53 erratum workaround for #845719
   - defconfig updates (new platforms, PCI, ...)"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (39 commits)
  arm64: fix midr range for Cortex-A57 erratum 832075
  arm64: errata: add workaround for cortex-a53 erratum #845719
  arm64: Use bool function return values of true/false not 1/0
  arm64: defconfig: updates for 4.1
  arm64: Extract feature parsing code from cpu_errata.c
  arm64: alternative: Allow immediate branch as alternative instruction
  arm64: insn: Add aarch64_insn_decode_immediate
  ARM: kvm: round HYP section to page size instead of log2 upper bound
  ARM: kvm: assert on HYP section boundaries not actual code size
  arm64: head.S: ensure idmap_t0sz is visible
  arm64: pmu: add support for interrupt-affinity property
  dt: pmu: extend ARM PMU binding to allow for explicit interrupt affinity
  arm64: head.S: ensure visibility of page tables
  arm64: KVM: use ID map with increased VA range if required
  arm64: mm: increase VA range of identity map
  ARM: kvm: implement replacement for ld's LOG2CEIL()
  arm64: proc: remove unused cpu_get_pgd macro
  arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol
  arm64: remove __calc_phys_offset
  arm64: merge __enable_mmu and __turn_mmu_on
  ...
parents d19d5efd 6d1966df
...@@ -26,6 +26,13 @@ Required properties: ...@@ -26,6 +26,13 @@ Required properties:
Optional properties: Optional properties:
- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles
to CPU nodes corresponding directly to the affinity of
the SPIs listed in the interrupts property.
This property should be present when there is more than
a single SPI.
- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
events. events.
......
...@@ -269,6 +269,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) ...@@ -269,6 +269,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
return false;
}
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */ #endif /* __ARM_KVM_MMU_H__ */
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
VMLINUX_SYMBOL(__idmap_text_start) = .; \ VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \ *(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .; \ VMLINUX_SYMBOL(__idmap_text_end) = .; \
. = ALIGN(32); \ . = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \ *(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
...@@ -343,8 +343,11 @@ SECTIONS ...@@ -343,8 +343,11 @@ SECTIONS
*/ */
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
/* /*
* The HYP init code can't be more than a page long. * The HYP init code can't be more than a page long,
* and should not cross a page boundary.
* The above comment applies as well. * The above comment applies as well.
*/ */
ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
"HYP init code too big or misaligned")
...@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; ...@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
static pgd_t *boot_hyp_pgd; static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd; static pgd_t *hyp_pgd;
static pgd_t *merged_hyp_pgd;
static DEFINE_MUTEX(kvm_hyp_pgd_mutex); static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
static void *init_bounce_page;
static unsigned long hyp_idmap_start; static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end; static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector; static phys_addr_t hyp_idmap_vector;
...@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void) ...@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void)
if (hyp_pgd) if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex); mutex_unlock(&kvm_hyp_pgd_mutex);
} }
...@@ -438,6 +435,11 @@ void free_hyp_pgds(void) ...@@ -438,6 +435,11 @@ void free_hyp_pgds(void)
free_pages((unsigned long)hyp_pgd, hyp_pgd_order); free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
hyp_pgd = NULL; hyp_pgd = NULL;
} }
if (merged_hyp_pgd) {
clear_page(merged_hyp_pgd);
free_page((unsigned long)merged_hyp_pgd);
merged_hyp_pgd = NULL;
}
mutex_unlock(&kvm_hyp_pgd_mutex); mutex_unlock(&kvm_hyp_pgd_mutex);
} }
...@@ -1622,11 +1624,17 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) ...@@ -1622,11 +1624,17 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
phys_addr_t kvm_mmu_get_httbr(void) phys_addr_t kvm_mmu_get_httbr(void)
{ {
if (__kvm_cpu_uses_extended_idmap())
return virt_to_phys(merged_hyp_pgd);
else
return virt_to_phys(hyp_pgd); return virt_to_phys(hyp_pgd);
} }
phys_addr_t kvm_mmu_get_boot_httbr(void) phys_addr_t kvm_mmu_get_boot_httbr(void)
{ {
if (__kvm_cpu_uses_extended_idmap())
return virt_to_phys(merged_hyp_pgd);
else
return virt_to_phys(boot_hyp_pgd); return virt_to_phys(boot_hyp_pgd);
} }
...@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void) ...@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void)
hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
/* /*
* Our init code is crossing a page boundary. Allocate * We rely on the linker script to ensure at build time that the HYP
* a bounce page, copy the code over and use that. * init code does not cross a page boundary.
*/ */
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
phys_addr_t phys_base;
init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
goto out;
}
memcpy(init_bounce_page, __hyp_idmap_text_start, len);
/*
* Warning: the code we just copied to the bounce page
* must be flushed to the point of coherency.
* Otherwise, the data may be sitting in L2, and HYP
* mode won't be able to observe it as it runs with
* caches off at that point.
*/
kvm_flush_dcache_to_poc(init_bounce_page, len);
phys_base = kvm_virt_to_phys(init_bounce_page);
hyp_idmap_vector += phys_base - hyp_idmap_start;
hyp_idmap_start = phys_base;
hyp_idmap_end = phys_base + len;
kvm_info("Using HYP init bounce page @%lx\n",
(unsigned long)phys_base);
}
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
...@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void) ...@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void)
goto out; goto out;
} }
if (__kvm_cpu_uses_extended_idmap()) {
merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!merged_hyp_pgd) {
kvm_err("Failed to allocate extra HYP pgd\n");
goto out;
}
__kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
hyp_idmap_start);
return 0;
}
/* Map the very same page at the trampoline VA */ /* Map the very same page at the trampoline VA */
err = __create_hyp_mappings(boot_hyp_pgd, err = __create_hyp_mappings(boot_hyp_pgd,
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
......
...@@ -368,6 +368,27 @@ config ARM64_ERRATUM_832075 ...@@ -368,6 +368,27 @@ config ARM64_ERRATUM_832075
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
default y
help
This option adds an alternative code sequence to work around ARM
erratum 845719 on Cortex-A53 parts up to r0p4.
When running a compat (AArch32) userspace on an affected Cortex-A53
part, a load at EL0 from a virtual address that matches the bottom 32
bits of the virtual address used by a recent load at (AArch64) EL1
might return incorrect data.
The workaround is to write the contextidr_el1 register on exception
return to a 32-bit task.
Please note that this does not necessarily enable the workaround,
as it depends on the alternative framework, which will only patch
the kernel if an affected CPU is detected.
If unsure, say Y.
endmenu endmenu
...@@ -455,8 +476,8 @@ config SCHED_SMT ...@@ -455,8 +476,8 @@ config SCHED_SMT
places. If unsure say N here. places. If unsure say N here.
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-64)" int "Maximum number of CPUs (2-4096)"
range 2 64 range 2 4096
depends on SMP depends on SMP
# These have to remain sorted largest to smallest # These have to remain sorted largest to smallest
default "64" default "64"
...@@ -470,6 +491,10 @@ config HOTPLUG_CPU ...@@ -470,6 +491,10 @@ config HOTPLUG_CPU
source kernel/Kconfig.preempt source kernel/Kconfig.preempt
config UP_LATE_INIT
def_bool y
depends on !SMP
config HZ config HZ
int int
default 100 default 100
...@@ -670,7 +695,7 @@ source "fs/Kconfig.binfmt" ...@@ -670,7 +695,7 @@ source "fs/Kconfig.binfmt"
config COMPAT config COMPAT
bool "Kernel support for 32-bit EL0" bool "Kernel support for 32-bit EL0"
depends on !ARM64_64K_PAGES depends on !ARM64_64K_PAGES || EXPERT
select COMPAT_BINFMT_ELF select COMPAT_BINFMT_ELF
select HAVE_UID16 select HAVE_UID16
select OLD_SIGSUSPEND3 select OLD_SIGSUSPEND3
...@@ -681,6 +706,10 @@ config COMPAT ...@@ -681,6 +706,10 @@ config COMPAT
the user helper functions, VFP support and the ptrace interface are the user helper functions, VFP support and the ptrace interface are
handled appropriately by the kernel. handled appropriately by the kernel.
If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you
will only be able to execute AArch32 binaries that were compiled with
64k aligned segments.
If you want to execute 32-bit userspace applications, say Y. If you want to execute 32-bit userspace applications, say Y.
config SYSVIPC_COMPAT config SYSVIPC_COMPAT
......
...@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/ ...@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/ core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y) libs-y := arch/arm64/lib/ $(libs-y)
libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make # Default target when executing plain make
KBUILD_IMAGE := Image.gz KBUILD_IMAGE := Image.gz
......
...@@ -31,8 +31,12 @@ CONFIG_MODULES=y ...@@ -31,8 +31,12 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set # CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_EXYNOS7=y
CONFIG_ARCH_FSL_LS2085A=y CONFIG_ARCH_FSL_LS2085A=y
CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_THUNDER=y CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y CONFIG_ARCH_XGENE=y
...@@ -62,6 +66,7 @@ CONFIG_BPF_JIT=y ...@@ -62,6 +66,7 @@ CONFIG_BPF_JIT=y
# CONFIG_WIRELESS is not set # CONFIG_WIRELESS is not set
CONFIG_NET_9P=y CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y CONFIG_NET_9P_VIRTIO=y
# CONFIG_TEGRA_AHB is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
...@@ -81,6 +86,7 @@ CONFIG_NETDEVICES=y ...@@ -81,6 +86,7 @@ CONFIG_NETDEVICES=y
CONFIG_TUN=y CONFIG_TUN=y
CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_NET=y
CONFIG_NET_XGENE=y CONFIG_NET_XGENE=y
CONFIG_SKY2=y
CONFIG_SMC91X=y CONFIG_SMC91X=y
CONFIG_SMSC911X=y CONFIG_SMSC911X=y
# CONFIG_WLAN is not set # CONFIG_WLAN is not set
...@@ -100,6 +106,8 @@ CONFIG_SPI=y ...@@ -100,6 +106,8 @@ CONFIG_SPI=y
CONFIG_SPI_PL022=y CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y CONFIG_GPIO_PL061=y
CONFIG_GPIO_XGENE=y CONFIG_GPIO_XGENE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
CONFIG_REGULATOR=y CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FIXED_VOLTAGE=y
...@@ -112,10 +120,10 @@ CONFIG_LOGO=y ...@@ -112,10 +120,10 @@ CONFIG_LOGO=y
CONFIG_USB=y CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE=y
CONFIG_USB_ISP1760=y
CONFIG_USB_ULPI=y CONFIG_USB_ULPI=y
CONFIG_MMC=y CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y CONFIG_MMC_ARMMMCI=y
...@@ -125,6 +133,7 @@ CONFIG_MMC_SPI=y ...@@ -125,6 +133,7 @@ CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_XGENE=y CONFIG_RTC_DRV_XGENE=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO=y
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
...@@ -143,8 +152,10 @@ CONFIG_CUSE=y ...@@ -143,8 +152,10 @@ CONFIG_CUSE=y
CONFIG_VFAT_FS=y CONFIG_VFAT_FS=y
CONFIG_TMPFS=y CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y CONFIG_HUGETLBFS=y
CONFIG_EFIVAR_FS=y
# CONFIG_MISC_FILESYSTEMS is not set # CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_437=y
...@@ -159,7 +170,6 @@ CONFIG_LOCKUP_DETECTOR=y ...@@ -159,7 +170,6 @@ CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set # CONFIG_FTRACE is not set
CONFIG_KEYS=y
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y CONFIG_ARM64_CRYPTO=y
......
...@@ -101,19 +101,19 @@ ENTRY(ce_aes_ccm_final) ...@@ -101,19 +101,19 @@ ENTRY(ce_aes_ccm_final)
0: mov v4.16b, v3.16b 0: mov v4.16b, v3.16b
1: ld1 {v5.2d}, [x2], #16 /* load next round key */ 1: ld1 {v5.2d}, [x2], #16 /* load next round key */
aese v0.16b, v4.16b aese v0.16b, v4.16b
aese v1.16b, v4.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
2: ld1 {v3.2d}, [x2], #16 /* load next round key */ 2: ld1 {v3.2d}, [x2], #16 /* load next round key */
aese v0.16b, v5.16b aese v0.16b, v5.16b
aese v1.16b, v5.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
3: ld1 {v4.2d}, [x2], #16 /* load next round key */ 3: ld1 {v4.2d}, [x2], #16 /* load next round key */
subs w3, w3, #3 subs w3, w3, #3
aese v0.16b, v3.16b aese v0.16b, v3.16b
aese v1.16b, v3.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v3.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
bpl 1b bpl 1b
aese v0.16b, v4.16b aese v0.16b, v4.16b
...@@ -146,19 +146,19 @@ ENDPROC(ce_aes_ccm_final) ...@@ -146,19 +146,19 @@ ENDPROC(ce_aes_ccm_final)
ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
2: /* inner loop: 3 rounds, 2x interleaved */ 2: /* inner loop: 3 rounds, 2x interleaved */
aese v0.16b, v4.16b aese v0.16b, v4.16b
aese v1.16b, v4.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
3: ld1 {v3.2d}, [x10], #16 /* load next round key */ 3: ld1 {v3.2d}, [x10], #16 /* load next round key */
aese v0.16b, v5.16b aese v0.16b, v5.16b
aese v1.16b, v5.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
4: ld1 {v4.2d}, [x10], #16 /* load next round key */ 4: ld1 {v4.2d}, [x10], #16 /* load next round key */
subs w7, w7, #3 subs w7, w7, #3
aese v0.16b, v3.16b aese v0.16b, v3.16b
aese v1.16b, v3.16b
aesmc v0.16b, v0.16b aesmc v0.16b, v0.16b
aese v1.16b, v3.16b
aesmc v1.16b, v1.16b aesmc v1.16b, v1.16b
ld1 {v5.2d}, [x10], #16 /* load next round key */ ld1 {v5.2d}, [x10], #16 /* load next round key */
bpl 2b bpl 2b
......
...@@ -45,18 +45,14 @@ ...@@ -45,18 +45,14 @@
.macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3
aes\de \i0\().16b, \k\().16b aes\de \i0\().16b, \k\().16b
.ifnb \i1
aes\de \i1\().16b, \k\().16b
.ifnb \i3
aes\de \i2\().16b, \k\().16b
aes\de \i3\().16b, \k\().16b
.endif
.endif
aes\mc \i0\().16b, \i0\().16b aes\mc \i0\().16b, \i0\().16b
.ifnb \i1 .ifnb \i1
aes\de \i1\().16b, \k\().16b
aes\mc \i1\().16b, \i1\().16b aes\mc \i1\().16b, \i1\().16b
.ifnb \i3 .ifnb \i3
aes\de \i2\().16b, \k\().16b
aes\mc \i2\().16b, \i2\().16b aes\mc \i2\().16b, \i2\().16b
aes\de \i3\().16b, \k\().16b
aes\mc \i3\().16b, \i3\().16b aes\mc \i3\().16b, \i3\().16b
.endif .endif
.endif .endif
......
...@@ -159,4 +159,52 @@ lr .req x30 // link register ...@@ -159,4 +159,52 @@ lr .req x30 // link register
orr \rd, \lbits, \hbits, lsl #32 orr \rd, \lbits, \hbits, lsl #32
.endm .endm
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
* <symbol> is within the range +/- 4 GB of the PC.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
* @tmp: optional scratch register to be used if <dst> == sp, which
* is not allowed in an adrp instruction
*/
.macro adr_l, dst, sym, tmp=
.ifb \tmp
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
.else
adrp \tmp, \sym
add \dst, \tmp, :lo12:\sym
.endif
.endm
/*
* @dst: destination register (32 or 64 bit wide)
* @sym: name of the symbol
* @tmp: optional 64-bit scratch register to be used if <dst> is a
* 32-bit wide register, in which case it cannot be used to hold
* the address
*/
.macro ldr_l, dst, sym, tmp=
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
.else
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
.endm
/*
* @src: source register (32 or 64 bit wide)
* @sym: name of the symbol
* @tmp: mandatory 64-bit scratch register to calculate the address
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
.endm
#endif /* __ASM_ASSEMBLER_H */ #endif /* __ASM_ASSEMBLER_H */
...@@ -23,11 +23,24 @@ ...@@ -23,11 +23,24 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0 #define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
#define ARM64_NCAPS 2 #define ARM64_NCAPS 3
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
bool (*matches)(const struct arm64_cpu_capabilities *);
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
u32 midr_range_min, midr_range_max;
};
};
};
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
static inline bool cpu_have_feature(unsigned int num) static inline bool cpu_have_feature(unsigned int num)
...@@ -51,7 +64,10 @@ static inline void cpus_set_cap(unsigned int num) ...@@ -51,7 +64,10 @@ static inline void cpus_set_cap(unsigned int num)
__set_bit(num, cpu_hwcaps); __set_bit(num, cpu_hwcaps);
} }
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
void check_local_cpu_errata(void); void check_local_cpu_errata(void);
void check_local_cpu_features(void);
bool cpu_supports_mixed_endian_el0(void); bool cpu_supports_mixed_endian_el0(void);
bool system_supports_mixed_endian_el0(void); bool system_supports_mixed_endian_el0(void);
......
/*
* arch/arm64/include/asm/cputable.h
*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_CPUTABLE_H
#define __ASM_CPUTABLE_H
struct cpu_info {
unsigned int cpu_id_val;
unsigned int cpu_id_mask;
const char *cpu_name;
unsigned long (*cpu_setup)(void);
};
extern struct cpu_info *lookup_processor_type(unsigned int);
#endif
...@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) ...@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
return 0; return false;
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
enum fixed_addresses { enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
__end_of_permanent_fixed_addresses, __end_of_permanent_fixed_addresses,
/* /*
...@@ -49,7 +50,6 @@ enum fixed_addresses { ...@@ -49,7 +50,6 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses, FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
FIX_TEXT_POKE0,
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
......
...@@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn); ...@@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn); int aarch64_insn_write(void *addr, u32 insn);
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm); u32 insn, u64 imm);
u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
......
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
...@@ -269,5 +271,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) ...@@ -269,5 +271,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
return __cpu_uses_extended_idmap();
}
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start)
{
int idmap_idx;
/*
* Use the first entry to access the HYP mappings. It is
* guaranteed to be free, otherwise we wouldn't use an
* extended idmap.
*/
VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
/*
* Create another extended level entry that points to the boot HYP map,
* which contains an ID mapping of the HYP init code. We essentially
* merge the boot and runtime HYP maps by doing so, but they don't
* overlap anyway, so this is fine.
*/
idmap_idx = hyp_idmap_start >> VA_BITS;
VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) ...@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void)
: "r" (ttbr)); : "r" (ttbr));
} }
/*
* TCR.T0SZ value to use when the ID map is active. Usually equals
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
* physical memory, in which case it will be smaller.
*/
extern u64 idmap_t0sz;
static inline bool __cpu_uses_extended_idmap(void)
{
return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
}
static inline void __cpu_set_tcr_t0sz(u64 t0sz)
{
unsigned long tcr;
if (__cpu_uses_extended_idmap())
asm volatile (
" mrs %0, tcr_el1 ;"
" bfi %0, %1, %2, %3 ;"
" msr tcr_el1, %0 ;"
" isb"
: "=&r" (tcr)
: "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
}
/*
* Set TCR.T0SZ to the value appropriate for activating the identity map.
*/
static inline void cpu_set_idmap_tcr_t0sz(void)
{
__cpu_set_tcr_t0sz(idmap_t0sz);
}
/*
* Set TCR.T0SZ to its default value (based on VA_BITS)
*/
static inline void cpu_set_default_tcr_t0sz(void)
{
__cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS));
}
static inline void switch_new_context(struct mm_struct *mm) static inline void switch_new_context(struct mm_struct *mm)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -33,7 +33,9 @@ ...@@ -33,7 +33,9 @@
* image. Both require pgd, pud (4 levels only) and pmd tables to (section) * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
* map the kernel. With the 64K page configuration, swapper and idmap need to * map the kernel. With the 64K page configuration, swapper and idmap need to
* map to pte level. The swapper also maps the FDT (see __create_page_tables * map to pte level. The swapper also maps the FDT (see __create_page_tables
* for more information). * for more information). Note that the number of ID map translation levels
* could be increased on the fly if system RAM is out of reach for the default
* VA range, so 3 pages are reserved in all cases.
*/ */
#ifdef CONFIG_ARM64_64K_PAGES #ifdef CONFIG_ARM64_64K_PAGES
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
...@@ -42,7 +44,7 @@ ...@@ -42,7 +44,7 @@
#endif #endif
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) #define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -143,7 +143,12 @@ ...@@ -143,7 +143,12 @@
/* /*
* TCR flags. * TCR flags.
*/ */
#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) #define TCR_T0SZ_OFFSET 0
#define TCR_T1SZ_OFFSET 16
#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
#define TCR_TxSZ_WIDTH 6
#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
......
...@@ -44,6 +44,7 @@ struct pmu_hw_events { ...@@ -44,6 +44,7 @@ struct pmu_hw_events {
struct arm_pmu { struct arm_pmu {
struct pmu pmu; struct pmu pmu;
cpumask_t active_irqs; cpumask_t active_irqs;
int *irq_affinity;
const char *name; const char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev); irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx); void (*enable)(struct hw_perf_event *evt, int idx);
......
...@@ -45,15 +45,6 @@ do { \ ...@@ -45,15 +45,6 @@ do { \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \ cpu_do_switch_mm(virt_to_phys(pgd),mm); \
} while (0) } while (0)
#define cpu_get_pgd() \
({ \
unsigned long pg; \
asm("mrs %0, ttbr0_el1\n" \
: "=r" (pg)); \
pg &= ~0xffff000000003ffful; \
(pgd_t *)phys_to_virt(pg); \
})
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */ #endif /* __ASM_PROCFNS_H */
...@@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *); ...@@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier() static inline void cpu_relax(void)
{
asm volatile("yield" ::: "memory");
}
#define cpu_relax_lowlatency() cpu_relax() #define cpu_relax_lowlatency() cpu_relax()
/* Thread switching */ /* Thread switching */
......
...@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) ...@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
extern u64 __cpu_logical_map[NR_CPUS]; extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu] #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
void __init do_post_cpus_up_work(void);
#endif /* __ASM_SMP_PLAT_H */ #endif /* __ASM_SMP_PLAT_H */
...@@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork) ...@@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork)
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */
#define __NR_mmap2 192 #define __NR_mmap2 192
__SYSCALL(__NR_mmap2, sys_mmap_pgoff) __SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper)
#define __NR_truncate64 193 #define __NR_truncate64 193
__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
#define __NR_ftruncate64 194 #define __NR_ftruncate64 194
......
...@@ -12,12 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg ...@@ -12,12 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_return_address.o = -pg CFLAGS_REMOVE_return_address.o = -pg
# Object file lists. # Object file lists.
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \ return_address.o cpuinfo.o cpu_errata.o \
alternative.o cacheinfo.o cpufeature.o alternative.o cacheinfo.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o entry32.o \ sys_compat.o entry32.o \
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/insn.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
...@@ -33,6 +34,48 @@ struct alt_region { ...@@ -33,6 +34,48 @@ struct alt_region {
struct alt_instr *end; struct alt_instr *end;
}; };
/*
* Decode the imm field of a b/bl instruction, and return the byte
* offset as a signed value (so it can be used when computing a new
* branch target).
*/
static s32 get_branch_offset(u32 insn)
{
s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
/* sign-extend the immediate before turning it into a byte offset */
return (imm << 6) >> 4;
}
static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
{
u32 insn;
aarch64_insn_read(altinsnptr, &insn);
/* Stop the world on instructions we don't support... */
BUG_ON(aarch64_insn_is_cbz(insn));
BUG_ON(aarch64_insn_is_cbnz(insn));
BUG_ON(aarch64_insn_is_bcond(insn));
/* ... and there is probably more. */
if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
enum aarch64_insn_branch_type type;
unsigned long target;
if (aarch64_insn_is_b(insn))
type = AARCH64_INSN_BRANCH_NOLINK;
else
type = AARCH64_INSN_BRANCH_LINK;
target = (unsigned long)altinsnptr + get_branch_offset(insn);
insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
target, type);
}
return insn;
}
static int __apply_alternatives(void *alt_region) static int __apply_alternatives(void *alt_region)
{ {
struct alt_instr *alt; struct alt_instr *alt;
...@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region) ...@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region)
u8 *origptr, *replptr; u8 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) { for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
int i;
if (!cpus_have_cap(alt->cpufeature)) if (!cpus_have_cap(alt->cpufeature))
continue; continue;
BUG_ON(alt->alt_len > alt->orig_len); BUG_ON(alt->alt_len != alt->orig_len);
pr_info_once("patching kernel code\n"); pr_info_once("patching kernel code\n");
origptr = (u8 *)&alt->orig_offset + alt->orig_offset; origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
replptr = (u8 *)&alt->alt_offset + alt->alt_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
memcpy(origptr, replptr, alt->alt_len);
for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
insn = get_alt_insn(origptr + i, replptr + i);
aarch64_insn_write(origptr + i, insn);
}
flush_icache_range((uintptr_t)origptr, flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + alt->alt_len)); (uintptr_t)(origptr + alt->alt_len));
} }
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cputable.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
...@@ -70,9 +69,6 @@ int main(void) ...@@ -70,9 +69,6 @@ int main(void)
BLANK(); BLANK();
DEFINE(PAGE_SZ, PAGE_SIZE); DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK(); BLANK();
DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info));
DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#define pr_fmt(fmt) "alternatives: " fmt
#include <linux/types.h> #include <linux/types.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
...@@ -26,27 +24,11 @@ ...@@ -26,27 +24,11 @@
#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
/*
* Add a struct or another datatype to the union below if you need
* different means to detect an affected CPU.
*/
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
bool (*is_affected)(struct arm64_cpu_capabilities *);
union {
struct {
u32 midr_model;
u32 midr_range_min, midr_range_max;
};
};
};
#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK) MIDR_ARCHITECTURE_MASK)
static bool __maybe_unused static bool __maybe_unused
is_affected_midr_range(struct arm64_cpu_capabilities *entry) is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{ {
u32 midr = read_cpuid_id(); u32 midr = read_cpuid_id();
...@@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry) ...@@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry)
} }
#define MIDR_RANGE(model, min, max) \ #define MIDR_RANGE(model, min, max) \
.is_affected = is_affected_midr_range, \ .matches = is_affected_midr_range, \
.midr_model = model, \ .midr_model = model, \
.midr_range_min = min, \ .midr_range_min = min, \
.midr_range_max = max .midr_range_max = max
struct arm64_cpu_capabilities arm64_errata[] = { const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \ #if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \
defined(CONFIG_ARM64_ERRATUM_824069) defined(CONFIG_ARM64_ERRATUM_824069)
...@@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */ /* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075", .desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 2),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
{
/* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 845719",
.capability = ARM64_WORKAROUND_845719,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
}, },
#endif #endif
{ {
...@@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = {
void check_local_cpu_errata(void) void check_local_cpu_errata(void)
{ {
struct arm64_cpu_capabilities *cpus = arm64_errata; check_cpu_capabilities(arm64_errata, "enabling workaround for");
int i;
for (i = 0; cpus[i].desc; i++) {
if (!cpus[i].is_affected(&cpus[i]))
continue;
if (!cpus_have_cap(cpus[i].capability))
pr_info("enabling workaround for %s\n", cpus[i].desc);
cpus_set_cap(cpus[i].capability);
}
} }
/* /*
* arch/arm64/kernel/cputable.c * Contains CPU feature definitions
* *
* Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 ARM Ltd.
* *
* This program is free software: you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
...@@ -16,18 +16,32 @@ ...@@ -16,18 +16,32 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/init.h> #define pr_fmt(fmt) "alternatives: " fmt
#include <asm/cputable.h> #include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
extern unsigned long __cpu_setup(void); static const struct arm64_cpu_capabilities arm64_features[] = {
{},
struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
.cpu_name = "AArch64 Processor",
.cpu_setup = __cpu_setup,
},
{ /* Empty */ },
}; };
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info)
{
int i;
for (i = 0; caps[i].desc; i++) {
if (!caps[i].matches(&caps[i]))
continue;
if (!cpus_have_cap(caps[i].capability))
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
}
void check_local_cpu_features(void)
{
check_cpu_capabilities(arm64_features, "detected feature");
}
...@@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) ...@@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
cpuinfo_detect_icache_policy(info); cpuinfo_detect_icache_policy(info);
check_local_cpu_errata(); check_local_cpu_errata();
check_local_cpu_features();
update_cpu_features(info); update_cpu_features(info);
} }
......
...@@ -21,8 +21,10 @@ ...@@ -21,8 +21,10 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -120,6 +122,24 @@ ...@@ -120,6 +122,24 @@
ct_user_enter ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23 msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_insn \
"nop", \
"tbz x22, #4, 1f", \
ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
alternative_insn \
"nop; nop", \
"mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
ARM64_WORKAROUND_845719
#else
alternative_insn \
"nop", \
"msr contextidr_el1, xzr; 1:", \
ARM64_WORKAROUND_845719
#endif
#endif
.endif .endif
msr elr_el1, x21 // set up the return data msr elr_el1, x21 // set up the return data
msr spsr_el1, x22 msr spsr_el1, x22
......
...@@ -19,9 +19,12 @@ ...@@ -19,9 +19,12 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/const.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/page.h>
/* /*
* System call wrappers for the AArch32 compatibility layer. * System call wrappers for the AArch32 compatibility layer.
...@@ -53,6 +56,21 @@ ENTRY(compat_sys_fstatfs64_wrapper) ...@@ -53,6 +56,21 @@ ENTRY(compat_sys_fstatfs64_wrapper)
b compat_sys_fstatfs64 b compat_sys_fstatfs64
ENDPROC(compat_sys_fstatfs64_wrapper) ENDPROC(compat_sys_fstatfs64_wrapper)
/*
* Note: off_4k (w5) is always in units of 4K. If we can't do the
* requested offset because it is not page-aligned, we return -EINVAL.
*/
ENTRY(compat_sys_mmap2_wrapper)
#if PAGE_SHIFT > 12
tst w5, #~PAGE_MASK >> 12
b.ne 1f
lsr w5, w5, #PAGE_SHIFT - 12
#endif
b sys_mmap_pgoff
1: mov x0, #-EINVAL
ret
ENDPROC(compat_sys_mmap2_wrapper)
/* /*
* Wrappers for AArch32 syscalls that either take 64-bit parameters * Wrappers for AArch32 syscalls that either take 64-bit parameters
* in registers or that take 32-bit parameters which require sign * in registers or that take 32-bit parameters which require sign
......
This diff is collapsed.
...@@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) ...@@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
return aarch64_insn_patch_text_sync(addrs, insns, cnt); return aarch64_insn_patch_text_sync(addrs, insns, cnt);
} }
u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 insn, u64 imm) u32 *maskp, int *shiftp)
{ {
u32 immlo, immhi, lomask, himask, mask; u32 mask;
int shift; int shift;
switch (type) { switch (type) {
case AARCH64_INSN_IMM_ADR:
lomask = 0x3;
himask = 0x7ffff;
immlo = imm & lomask;
imm >>= 2;
immhi = imm & himask;
imm = (immlo << 24) | (immhi);
mask = (lomask << 24) | (himask);
shift = 5;
break;
case AARCH64_INSN_IMM_26: case AARCH64_INSN_IMM_26:
mask = BIT(26) - 1; mask = BIT(26) - 1;
shift = 0; shift = 0;
...@@ -320,10 +310,69 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, ...@@ -320,10 +310,69 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
shift = 16; shift = 16;
break; break;
default: default:
return -EINVAL;
}
*maskp = mask;
*shiftp = shift;
return 0;
}
#define ADR_IMM_HILOSPLIT 2
#define ADR_IMM_SIZE SZ_2M
#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
#define ADR_IMM_LOSHIFT 29
#define ADR_IMM_HISHIFT 5
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
{
u32 immlo, immhi, mask;
int shift;
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
mask = ADR_IMM_SIZE - 1;
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
type);
return 0;
}
}
return (insn >> shift) & mask;
}
u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 insn, u64 imm)
{
u32 immlo, immhi, mask;
int shift;
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
imm >>= ADR_IMM_HILOSPLIT;
immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
imm = immlo | immhi;
mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
break;
default:
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type); type);
return 0; return 0;
} }
}
/* Update the immediate field. */ /* Update the immediate field. */
insn &= ~(mask << shift); insn &= ~(mask << shift);
......
...@@ -25,8 +25,10 @@ ...@@ -25,8 +25,10 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/of.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -322,22 +324,31 @@ armpmu_add(struct perf_event *event, int flags) ...@@ -322,22 +324,31 @@ armpmu_add(struct perf_event *event, int flags)
} }
static int static int
validate_event(struct pmu_hw_events *hw_events, validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
struct perf_event *event) struct perf_event *event)
{ {
struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct arm_pmu *armpmu;
struct hw_perf_event fake_event = event->hw; struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu; struct pmu *leader_pmu = event->group_leader->pmu;
if (is_software_event(event)) if (is_software_event(event))
return 1; return 1;
/*
* Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
* core perf code won't check that the pmu->ctx == leader->ctx
* until after pmu->event_init(event).
*/
if (event->pmu != pmu)
return 0;
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1; return 1;
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1; return 1;
armpmu = to_arm_pmu(event->pmu);
return armpmu->get_event_idx(hw_events, &fake_event) >= 0; return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
} }
...@@ -355,15 +366,15 @@ validate_group(struct perf_event *event) ...@@ -355,15 +366,15 @@ validate_group(struct perf_event *event)
memset(fake_used_mask, 0, sizeof(fake_used_mask)); memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask; fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader)) if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) { list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling)) if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL; return -EINVAL;
} }
if (!validate_event(&fake_pmu, event)) if (!validate_event(event->pmu, &fake_pmu, event))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) ...@@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
free_percpu_irq(irq, &cpu_hw_events); free_percpu_irq(irq, &cpu_hw_events);
} else { } else {
for (i = 0; i < irqs; ++i) { for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) int cpu = i;
if (armpmu->irq_affinity)
cpu = armpmu->irq_affinity[i];
if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
continue; continue;
irq = platform_get_irq(pmu_device, i); irq = platform_get_irq(pmu_device, i);
if (irq > 0) if (irq > 0)
...@@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
} else { } else {
for (i = 0; i < irqs; ++i) { for (i = 0; i < irqs; ++i) {
int cpu = i;
err = 0; err = 0;
irq = platform_get_irq(pmu_device, i); irq = platform_get_irq(pmu_device, i);
if (irq <= 0) if (irq <= 0)
continue; continue;
if (armpmu->irq_affinity)
cpu = armpmu->irq_affinity[i];
/* /*
* If we have a single PMU interrupt that we can't shift, * If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and * assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt. * continue. Otherwise, continue without this interrupt.
*/ */
if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i); irq, cpu);
continue; continue;
} }
...@@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err; return err;
} }
cpumask_set_cpu(i, &armpmu->active_irqs); cpumask_set_cpu(cpu, &armpmu->active_irqs);
} }
} }
...@@ -1289,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = { ...@@ -1289,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = {
static int armpmu_device_probe(struct platform_device *pdev) static int armpmu_device_probe(struct platform_device *pdev)
{ {
int i, *irqs;
if (!cpu_pmu) if (!cpu_pmu)
return -ENODEV; return -ENODEV;
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
return -ENOMEM;
for (i = 0; i < pdev->num_resources; ++i) {
struct device_node *dn;
int cpu;
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
i);
if (!dn) {
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
of_node_full_name(dn), i);
break;
}
for_each_possible_cpu(cpu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
break;
}
irqs[i] = cpu;
}
if (i == pdev->num_resources)
cpu_pmu->irq_affinity = irqs;
else
kfree(irqs);
cpu_pmu->plat_device = pdev; cpu_pmu->plat_device = pdev;
return 0; return 0;
} }
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/cputable.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -62,9 +61,7 @@ ...@@ -62,9 +61,7 @@
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/virt.h>
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
unsigned long elf_hwcap __read_mostly; unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap); EXPORT_SYMBOL_GPL(elf_hwcap);
...@@ -83,7 +80,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; ...@@ -83,7 +80,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
static const char *cpu_name;
phys_addr_t __fdt_pointer __initdata; phys_addr_t __fdt_pointer __initdata;
/* /*
...@@ -119,6 +115,11 @@ void __init early_print(const char *str, ...) ...@@ -119,6 +115,11 @@ void __init early_print(const char *str, ...)
printk("%s", buf); printk("%s", buf);
} }
/*
* The recorded values of x0 .. x3 upon kernel entry.
*/
u64 __cacheline_aligned boot_args[4];
void __init smp_setup_processor_id(void) void __init smp_setup_processor_id(void)
{ {
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
...@@ -207,24 +208,38 @@ static void __init smp_build_mpidr_hash(void) ...@@ -207,24 +208,38 @@ static void __init smp_build_mpidr_hash(void)
} }
#endif #endif
static void __init hyp_mode_check(void)
{
if (is_hyp_mode_available())
pr_info("CPU: All CPU(s) started at EL2\n");
else if (is_hyp_mode_mismatched())
WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
}
void __init do_post_cpus_up_work(void)
{
hyp_mode_check();
apply_alternatives_all();
}
#ifdef CONFIG_UP_LATE_INIT
void __init up_late_init(void)
{
do_post_cpus_up_work();
}
#endif /* CONFIG_UP_LATE_INIT */
static void __init setup_processor(void) static void __init setup_processor(void)
{ {
struct cpu_info *cpu_info;
u64 features, block; u64 features, block;
u32 cwg; u32 cwg;
int cls; int cls;
cpu_info = lookup_processor_type(read_cpuid_id()); printk("CPU: AArch64 Processor [%08x] revision %d\n",
if (!cpu_info) { read_cpuid_id(), read_cpuid_id() & 15);
printk("CPU configuration botched (ID %08x), unable to continue.\n",
read_cpuid_id());
while (1);
}
cpu_name = cpu_info->cpu_name;
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
sprintf(init_utsname()->machine, ELF_PLATFORM); sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0; elf_hwcap = 0;
...@@ -402,6 +417,12 @@ void __init setup_arch(char **cmdline_p) ...@@ -402,6 +417,12 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con; conswitchp = &dummy_con;
#endif #endif
#endif #endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
"This indicates a broken bootloader or old kernel\n",
boot_args[1], boot_args[2], boot_args[3]);
}
} }
static int __init arm64_device_init(void) static int __init arm64_device_init(void)
......
...@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) ...@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void)
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
flush_tlb_all(); flush_tlb_all();
cpu_set_default_tcr_t0sz();
preempt_disable(); preempt_disable();
trace_hardirqs_off(); trace_hardirqs_off();
...@@ -309,7 +310,7 @@ void cpu_die(void) ...@@ -309,7 +310,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
apply_alternatives_all(); do_post_cpus_up_work();
} }
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
......
...@@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void); ...@@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void);
asmlinkage long compat_sys_fadvise64_64_wrapper(void); asmlinkage long compat_sys_fadvise64_64_wrapper(void);
asmlinkage long compat_sys_sync_file_range2_wrapper(void); asmlinkage long compat_sys_sync_file_range2_wrapper(void);
asmlinkage long compat_sys_fallocate_wrapper(void); asmlinkage long compat_sys_fallocate_wrapper(void);
asmlinkage long compat_sys_mmap2_wrapper(void);
#undef __SYSCALL #undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym, #define __SYSCALL(nr, sym) [nr] = sym,
......
...@@ -23,10 +23,14 @@ jiffies = jiffies_64; ...@@ -23,10 +23,14 @@ jiffies = jiffies_64;
#define HYPERVISOR_TEXT \ #define HYPERVISOR_TEXT \
/* \ /* \
* Force the alignment to be compatible with \ * Align to 4 KB so that \
* the vectors requirements \ * a) the HYP vector table is at its minimum \
* alignment of 2048 bytes \
* b) the HYP init code will not cross a page \
* boundary if its size does not exceed \
* 4 KB (see related ASSERT() below) \
*/ \ */ \
. = ALIGN(2048); \ . = ALIGN(SZ_4K); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \ *(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
...@@ -163,10 +167,11 @@ SECTIONS ...@@ -163,10 +167,11 @@ SECTIONS
} }
/* /*
* The HYP init code can't be more than a page long. * The HYP init code can't be more than a page long,
* and should not cross a page boundary.
*/ */
ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big") "HYP init code too big or misaligned")
/* /*
* If padding is applied before .head.text, virt<->phys conversions will fail. * If padding is applied before .head.text, virt<->phys conversions will fail.
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/pgtable-hwdef.h>
.text .text
.pushsection .hyp.idmap.text, "ax" .pushsection .hyp.idmap.text, "ax"
...@@ -65,6 +66,25 @@ __do_hyp_init: ...@@ -65,6 +66,25 @@ __do_hyp_init:
and x4, x4, x5 and x4, x4, x5
ldr x5, =TCR_EL2_FLAGS ldr x5, =TCR_EL2_FLAGS
orr x4, x4, x5 orr x4, x4, x5
#ifndef CONFIG_ARM64_VA_BITS_48
/*
* If we are running with VA_BITS < 48, we may be running with an extra
* level of translation in the ID map. This is only the case if system
* RAM is out of range for the currently configured page size and number
* of translation levels, in which case we will also need the extra
* level for the HYP ID map, or we won't be able to enable the EL2 MMU.
*
* However, at EL2, there is only one TTBR register, and we can't switch
* between translation tables *and* update TCR_EL2.T0SZ at the same
* time. Bottom line: we need the extra level in *both* our translation
* tables.
*
* So use the same T0SZ value we use for the ID map.
*/
ldr_l x5, idmap_t0sz
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
#endif
msr tcr_el2, x4 msr tcr_el2, x4
ldr x4, =VTCR_EL2_FLAGS ldr x4, =VTCR_EL2_FLAGS
...@@ -91,6 +111,10 @@ __do_hyp_init: ...@@ -91,6 +111,10 @@ __do_hyp_init:
msr sctlr_el2, x4 msr sctlr_el2, x4
isb isb
/* Skip the trampoline dance if we merged the boot and runtime PGDs */
cmp x0, x1
b.eq merged
/* MMU is now enabled. Get ready for the trampoline dance */ /* MMU is now enabled. Get ready for the trampoline dance */
ldr x4, =TRAMPOLINE_VA ldr x4, =TRAMPOLINE_VA
adr x5, target adr x5, target
...@@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */ ...@@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */
tlbi alle2 tlbi alle2
dsb sy dsb sy
merged:
/* Set the stack and new vectors */ /* Set the stack and new vectors */
kern_hyp_va x2 kern_hyp_va x2
mov sp, x2 mov sp, x2
......
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#include "mm.h" #include "mm.h"
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
/* /*
* Empty_zero_page is a special page that is used for zero-initialized data * Empty_zero_page is a special page that is used for zero-initialized data
* and COW. * and COW.
...@@ -454,6 +456,7 @@ void __init paging_init(void) ...@@ -454,6 +456,7 @@ void __init paging_init(void)
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
flush_tlb_all(); flush_tlb_all();
cpu_set_default_tcr_t0sz();
} }
/* /*
...@@ -461,8 +464,10 @@ void __init paging_init(void) ...@@ -461,8 +464,10 @@ void __init paging_init(void)
*/ */
void setup_mm_for_reboot(void) void setup_mm_for_reboot(void)
{ {
cpu_switch_mm(idmap_pg_dir, &init_mm); cpu_set_reserved_ttbr0();
flush_tlb_all(); flush_tlb_all();
cpu_set_idmap_tcr_t0sz();
cpu_switch_mm(idmap_pg_dir, &init_mm);
} }
/* /*
...@@ -627,10 +632,7 @@ void __set_fixmap(enum fixed_addresses idx, ...@@ -627,10 +632,7 @@ void __set_fixmap(enum fixed_addresses idx,
unsigned long addr = __fix_to_virt(idx); unsigned long addr = __fix_to_virt(idx);
pte_t *pte; pte_t *pte;
if (idx >= __end_of_fixed_addresses) { BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
BUG();
return;
}
pte = fixmap_pte(addr); pte = fixmap_pte(addr);
......
...@@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages) ...@@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages)
__pgprot(PTE_RDONLY), __pgprot(PTE_RDONLY),
__pgprot(PTE_WRITE)); __pgprot(PTE_WRITE));
} }
EXPORT_SYMBOL_GPL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages) int set_memory_rw(unsigned long addr, int numpages)
{ {
...@@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages) ...@@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages)
__pgprot(PTE_WRITE), __pgprot(PTE_WRITE),
__pgprot(PTE_RDONLY)); __pgprot(PTE_RDONLY));
} }
EXPORT_SYMBOL_GPL(set_memory_rw);
int set_memory_nx(unsigned long addr, int numpages) int set_memory_nx(unsigned long addr, int numpages)
{ {
......
...@@ -52,3 +52,13 @@ ...@@ -52,3 +52,13 @@
mov \reg, #4 // bytes per word mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size lsl \reg, \reg, \tmp // actual cache line size
.endm .endm
/*
* tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
*/
.macro tcr_set_idmap_t0sz, valreg, tmpreg
#ifndef CONFIG_ARM64_VA_BITS_48
ldr_l \tmpreg, idmap_t0sz
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
.endm
...@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) ...@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6 msr cpacr_el1, x6
msr ttbr0_el1, x1 msr ttbr0_el1, x1
msr ttbr1_el1, x7 msr ttbr1_el1, x7
tcr_set_idmap_t0sz x8, x7
msr tcr_el1, x8 msr tcr_el1, x8
msr vbar_el1, x9 msr vbar_el1, x9
msr mdscr_el1, x10 msr mdscr_el1, x10
...@@ -233,6 +234,8 @@ ENTRY(__cpu_setup) ...@@ -233,6 +234,8 @@ ENTRY(__cpu_setup)
*/ */
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
tcr_set_idmap_t0sz x10, x9
/* /*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
* TCR_EL1. * TCR_EL1.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment