Commit 63917f0b authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'kvm-arm64/kvm-for-3.11' of...

Merge branch 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into upstream

* 'kvm-arm64/kvm-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms: (33 commits)
  arm64: KVM: document kernel object mappings in HYP
  arm64: KVM: MAINTAINERS update
  arm64: KVM: userspace API documentation
  arm64: KVM: enable initialization of a 32bit vcpu
  arm64: KVM: 32bit guest fault injection
  arm64: KVM: 32bit specific register world switch
  arm64: KVM: CPU specific 32bit coprocessor access
  arm64: KVM: 32bit handling of coprocessor traps
  arm64: KVM: 32bit conditional execution emulation
  arm64: KVM: 32bit GP register access
  arm64: KVM: define 32bit specific registers
  arm64: KVM: Build system integration
  arm64: KVM: PSCI implementation
  arm64: KVM: Plug the arch timer
  ARM: KVM: timer: allow DT matching for ARMv8 cores
  arm64: KVM: Plug the VGIC
  arm64: KVM: Exit handling
  arm64: KVM: HYP mode world switch implementation
  arm64: KVM: hypervisor initialization code
  arm64: KVM: guest one-reg interface
  ...

Conflicts:
	arch/arm64/Makefile
parents d822d2a1 aa4a73a0
...@@ -73,3 +73,10 @@ Translation table lookup with 64KB pages: ...@@ -73,3 +73,10 @@ Translation table lookup with 64KB pages:
| | +--------------------------> [41:29] L2 index (only 38:29 used) | | +--------------------------> [41:29] L2 index (only 38:29 used)
| +-------------------------------> [47:42] L1 index (not used) | +-------------------------------> [47:42] L1 index (not used)
+-------------------------------------------------> [63] TTBR0/1 +-------------------------------------------------> [63] TTBR0/1
When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
offset from the kernel VA (top 24bits of the kernel VA set to zero):
Start End Size Use
-----------------------------------------------------------------------
0000004000000000 0000007fffffffff 256GB kernel objects mapped in HYP
...@@ -280,7 +280,7 @@ kvm_run' (see below). ...@@ -280,7 +280,7 @@ kvm_run' (see below).
4.11 KVM_GET_REGS 4.11 KVM_GET_REGS
Capability: basic Capability: basic
Architectures: all except ARM Architectures: all except ARM, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct kvm_regs (out) Parameters: struct kvm_regs (out)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -301,7 +301,7 @@ struct kvm_regs { ...@@ -301,7 +301,7 @@ struct kvm_regs {
4.12 KVM_SET_REGS 4.12 KVM_SET_REGS
Capability: basic Capability: basic
Architectures: all except ARM Architectures: all except ARM, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct kvm_regs (in) Parameters: struct kvm_regs (in)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -587,7 +587,7 @@ struct kvm_fpu { ...@@ -587,7 +587,7 @@ struct kvm_fpu {
4.24 KVM_CREATE_IRQCHIP 4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64, ARM Architectures: x86, ia64, ARM, arm64
Type: vm ioctl Type: vm ioctl
Parameters: none Parameters: none
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -595,14 +595,14 @@ Returns: 0 on success, -1 on error ...@@ -595,14 +595,14 @@ Returns: 0 on success, -1 on error
Creates an interrupt controller model in the kernel. On x86, creates a virtual Creates an interrupt controller model in the kernel. On x86, creates a virtual
ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM/arm64, a GIC is
created. created.
4.25 KVM_IRQ_LINE 4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP Capability: KVM_CAP_IRQCHIP
Architectures: x86, ia64, arm Architectures: x86, ia64, arm, arm64
Type: vm ioctl Type: vm ioctl
Parameters: struct kvm_irq_level Parameters: struct kvm_irq_level
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -612,9 +612,10 @@ On some architectures it is required that an interrupt controller model has ...@@ -612,9 +612,10 @@ On some architectures it is required that an interrupt controller model has
been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered
interrupts require the level to be set to 1 and then back to 0. interrupts require the level to be set to 1 and then back to 0.
ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip ARM/arm64 can signal an interrupt either at the CPU level, or at the
(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
specific cpus. The irq field is interpreted like this: use PPIs designated for specific cpus. The irq field is interpreted
like this:
 bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |
field: | irq_type | vcpu_index | irq_id | field: | irq_type | vcpu_index | irq_id |
...@@ -1831,6 +1832,22 @@ ARM 32-bit VFP control registers have the following id bit patterns: ...@@ -1831,6 +1832,22 @@ ARM 32-bit VFP control registers have the following id bit patterns:
ARM 64-bit FP registers have the following id bit patterns: ARM 64-bit FP registers have the following id bit patterns:
0x4030 0000 0012 0 <regno:12> 0x4030 0000 0012 0 <regno:12>
arm64 registers are mapped using the lower 32 bits. The upper 16 of
that is the register group type, or coprocessor number:
arm64 core/FP-SIMD registers have the following id bit patterns. Note
that the size of the access is variable, as the kvm_regs structure
contains elements ranging from 32 to 128 bits. The index is a 32bit
value in the kvm_regs structure seen as a 32bit array.
0x60x0 0000 0010 <index into the kvm_regs struct:16>
arm64 CCSIDR registers are demultiplexed by CSSELR value:
0x6020 0000 0011 00 <csselr:8>
arm64 system registers have the following id bit patterns:
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
4.69 KVM_GET_ONE_REG 4.69 KVM_GET_ONE_REG
Capability: KVM_CAP_ONE_REG Capability: KVM_CAP_ONE_REG
...@@ -2264,7 +2281,7 @@ current state. "addr" is ignored. ...@@ -2264,7 +2281,7 @@ current state. "addr" is ignored.
4.77 KVM_ARM_VCPU_INIT 4.77 KVM_ARM_VCPU_INIT
Capability: basic Capability: basic
Architectures: arm Architectures: arm, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct struct kvm_vcpu_init (in) Parameters: struct struct kvm_vcpu_init (in)
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -2283,12 +2300,14 @@ should be created before this ioctl is invoked. ...@@ -2283,12 +2300,14 @@ should be created before this ioctl is invoked.
Possible features: Possible features:
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
Depends on KVM_CAP_ARM_PSCI. Depends on KVM_CAP_ARM_PSCI.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
4.78 KVM_GET_REG_LIST 4.78 KVM_GET_REG_LIST
Capability: basic Capability: basic
Architectures: arm Architectures: arm, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct kvm_reg_list (in/out) Parameters: struct kvm_reg_list (in/out)
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -2308,7 +2327,7 @@ KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. ...@@ -2308,7 +2327,7 @@ KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
4.80 KVM_ARM_SET_DEVICE_ADDR 4.80 KVM_ARM_SET_DEVICE_ADDR
Capability: KVM_CAP_ARM_SET_DEVICE_ADDR Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
Architectures: arm Architectures: arm, arm64
Type: vm ioctl Type: vm ioctl
Parameters: struct kvm_arm_device_address (in) Parameters: struct kvm_arm_device_address (in)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -2329,18 +2348,19 @@ can access emulated or directly exposed devices, which the host kernel needs ...@@ -2329,18 +2348,19 @@ can access emulated or directly exposed devices, which the host kernel needs
to know about. The id field is an architecture specific identifier for a to know about. The id field is an architecture specific identifier for a
specific device. specific device.
ARM divides the id field into two parts, a device id and an address type id ARM/arm64 divides the id field into two parts, a device id and an
specific to the individual device. address type id specific to the individual device.
 bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
field: | 0x00000000 | device id | addr type id | field: | 0x00000000 | device id | addr type id |
ARM currently only require this when using the in-kernel GIC support for the ARM/arm64 currently only require this when using the in-kernel GIC
hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id. When support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2
setting the base address for the guest's mapping of the VGIC virtual CPU as the device id. When setting the base address for the guest's
and distributor interface, the ioctl must be called after calling mapping of the VGIC virtual CPU and distributor interface, the ioctl
KVM_CREATE_IRQCHIP, but before calling KVM_RUN on any of the VCPUs. Calling must be called after calling KVM_CREATE_IRQCHIP, but before calling
this ioctl twice for any of the base addresses will return -EEXIST. KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the
base addresses will return -EEXIST.
4.82 KVM_PPC_RTAS_DEFINE_TOKEN 4.82 KVM_PPC_RTAS_DEFINE_TOKEN
......
...@@ -4709,6 +4709,15 @@ F: arch/arm/include/uapi/asm/kvm* ...@@ -4709,6 +4709,15 @@ F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm* F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/ F: arch/arm/kvm/
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
S: Maintained
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/include/asm/kvm*
F: arch/arm64/kvm/
KEXEC KEXEC
M: Eric Biederman <ebiederm@xmission.com> M: Eric Biederman <ebiederm@xmission.com>
W: http://kernel.org/pub/linux/utils/kernel/kexec/ W: http://kernel.org/pub/linux/utils/kernel/kexec/
......
...@@ -195,6 +195,7 @@ static struct notifier_block kvm_timer_cpu_nb = { ...@@ -195,6 +195,7 @@ static struct notifier_block kvm_timer_cpu_nb = {
static const struct of_device_id arch_timer_of_match[] = { static const struct of_device_id arch_timer_of_match[] = {
{ .compatible = "arm,armv7-timer", }, { .compatible = "arm,armv7-timer", },
{ .compatible = "arm,armv8-timer", },
{}, {},
}; };
......
...@@ -37,7 +37,8 @@ TEXT_OFFSET := 0x00080000 ...@@ -37,7 +37,8 @@ TEXT_OFFSET := 0x00080000
export TEXT_OFFSET GZFLAGS export TEXT_OFFSET GZFLAGS
core-y += arch/arm64/kernel/ arch/arm64/mm/ core-y += arch/arm64/kernel/ arch/arm64/mm/
core-$(CONFIG_XEN) += arch/arm64/xen/ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
libs-y := arch/arm64/lib/ $(libs-y) libs-y := arch/arm64/lib/ $(libs-y)
libs-y += $(LIBGCC) libs-y += $(LIBGCC)
......
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
#define HCR_RW_SHIFT 31
#define HCR_RW (UL(1) << HCR_RW_SHIFT)
#define HCR_TRVM (UL(1) << 30)
#define HCR_HCD (UL(1) << 29)
#define HCR_TDZ (UL(1) << 28)
#define HCR_TGE (UL(1) << 27)
#define HCR_TVM (UL(1) << 26)
#define HCR_TTLB (UL(1) << 25)
#define HCR_TPU (UL(1) << 24)
#define HCR_TPC (UL(1) << 23)
#define HCR_TSW (UL(1) << 22)
#define HCR_TAC (UL(1) << 21)
#define HCR_TIDCP (UL(1) << 20)
#define HCR_TSC (UL(1) << 19)
#define HCR_TID3 (UL(1) << 18)
#define HCR_TID2 (UL(1) << 17)
#define HCR_TID1 (UL(1) << 16)
#define HCR_TID0 (UL(1) << 15)
#define HCR_TWE (UL(1) << 14)
#define HCR_TWI (UL(1) << 13)
#define HCR_DC (UL(1) << 12)
#define HCR_BSU (3 << 10)
#define HCR_BSU_IS (UL(1) << 10)
#define HCR_FB (UL(1) << 9)
#define HCR_VA (UL(1) << 8)
#define HCR_VI (UL(1) << 7)
#define HCR_VF (UL(1) << 6)
#define HCR_AMO (UL(1) << 5)
#define HCR_IMO (UL(1) << 4)
#define HCR_FMO (UL(1) << 3)
#define HCR_PTW (UL(1) << 2)
#define HCR_SWIO (UL(1) << 1)
#define HCR_VM (UL(1) << 0)
/*
* The bits we set in HCR:
* RW: 64bit by default, can be overriden for 32bit VMs
* TAC: Trap ACTLR
* TSC: Trap SMC
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
/* Hyp System Control Register (SCTLR_EL2) bits */
#define SCTLR_EL2_EE (1 << 25)
#define SCTLR_EL2_WXN (1 << 19)
#define SCTLR_EL2_I (1 << 12)
#define SCTLR_EL2_SA (1 << 3)
#define SCTLR_EL2_C (1 << 2)
#define SCTLR_EL2_A (1 << 1)
#define SCTLR_EL2_M 1
#define SCTLR_EL2_FLAGS (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C | \
SCTLR_EL2_SA | SCTLR_EL2_I)
/* TCR_EL2 Registers bits */
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS (7 << 16)
#define TCR_EL2_PS_40B (2 << 16)
#define TCR_EL2_TG0 (1 << 14)
#define TCR_EL2_SH0 (3 << 12)
#define TCR_EL2_ORGN0 (3 << 10)
#define TCR_EL2_IRGN0 (3 << 8)
#define TCR_EL2_T0SZ 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_PS_MASK (7 << 16)
#define VTCR_EL2_PS_40B (2 << 16)
#define VTCR_EL2_TG0_MASK (1 << 14)
#define VTCR_EL2_TG0_4K (0 << 14)
#define VTCR_EL2_TG0_64K (1 << 14)
#define VTCR_EL2_SH0_MASK (3 << 12)
#define VTCR_EL2_SH0_INNER (3 << 12)
#define VTCR_EL2_ORGN0_MASK (3 << 10)
#define VTCR_EL2_ORGN0_WBWA (1 << 10)
#define VTCR_EL2_IRGN0_MASK (3 << 8)
#define VTCR_EL2_IRGN0_WBWA (1 << 8)
#define VTCR_EL2_SL0_MASK (3 << 6)
#define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
#ifdef CONFIG_ARM64_64K_PAGES
/*
* Stage2 translation configuration:
* 40bits output (PS = 2)
* 40bits input (T0SZ = 24)
* 64kB pages (TG0 = 1)
* 2 level page tables (SL = 1)
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
VTCR_EL2_T0SZ_40B)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
* Stage2 translation configuration:
* 40bits output (PS = 2)
* 40bits input (T0SZ = 24)
* 4kB pages (TG0 = 0)
* 3 level page tables (SL = 1)
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
VTCR_EL2_T0SZ_40B)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
#define HSTR_EL2_TTEE (1 << 16)
#define HSTR_EL2_T(x) (1 << x)
/* Hyp Coprocessor Trap Register */
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << 10)
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TDRA (1 << 11)
#define MDCR_EL2_TDOSA (1 << 10)
#define MDCR_EL2_TDA (1 << 9)
#define MDCR_EL2_TDE (1 << 8)
#define MDCR_EL2_HPME (1 << 7)
#define MDCR_EL2_TPM (1 << 6)
#define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F)
/* Exception Syndrome Register (ESR) bits */
#define ESR_EL2_EC_SHIFT (26)
#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
#define ESR_EL2_IL (1U << 25)
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
#define ESR_EL2_ISV_SHIFT (24)
#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
#define ESR_EL2_SAS_SHIFT (22)
#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
#define ESR_EL2_SSE (1 << 21)
#define ESR_EL2_SRT_SHIFT (16)
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
#define ESR_EL2_SF (1 << 15)
#define ESR_EL2_AR (1 << 14)
#define ESR_EL2_EA (1 << 9)
#define ESR_EL2_CM (1 << 8)
#define ESR_EL2_S1PTW (1 << 7)
#define ESR_EL2_WNR (1 << 6)
#define ESR_EL2_FSC (0x3f)
#define ESR_EL2_FSC_TYPE (0x3c)
#define ESR_EL2_CV_SHIFT (24)
#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
#define ESR_EL2_COND_SHIFT (20)
#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~0xFUL)
#define ESR_EL2_EC_UNKNOWN (0x00)
#define ESR_EL2_EC_WFI (0x01)
#define ESR_EL2_EC_CP15_32 (0x03)
#define ESR_EL2_EC_CP15_64 (0x04)
#define ESR_EL2_EC_CP14_MR (0x05)
#define ESR_EL2_EC_CP14_LS (0x06)
#define ESR_EL2_EC_FP_ASIMD (0x07)
#define ESR_EL2_EC_CP10_ID (0x08)
#define ESR_EL2_EC_CP14_64 (0x0C)
#define ESR_EL2_EC_ILL_ISS (0x0E)
#define ESR_EL2_EC_SVC32 (0x11)
#define ESR_EL2_EC_HVC32 (0x12)
#define ESR_EL2_EC_SMC32 (0x13)
#define ESR_EL2_EC_SVC64 (0x15)
#define ESR_EL2_EC_HVC64 (0x16)
#define ESR_EL2_EC_SMC64 (0x17)
#define ESR_EL2_EC_SYS64 (0x18)
#define ESR_EL2_EC_IABT (0x20)
#define ESR_EL2_EC_IABT_HYP (0x21)
#define ESR_EL2_EC_PC_ALIGN (0x22)
#define ESR_EL2_EC_DABT (0x24)
#define ESR_EL2_EC_DABT_HYP (0x25)
#define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C)
#define ESR_EL2_EC_SERRROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32)
#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
#define ESR_EL2_EC_WATCHPT (0x34)
#define ESR_EL2_EC_WATCHPT_HYP (0x35)
#define ESR_EL2_EC_BKPT32 (0x38)
#define ESR_EL2_EC_VECTOR32 (0x3A)
#define ESR_EL2_EC_BRK64 (0x3C)
#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
#endif /* __ARM64_KVM_ARM_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
*/
#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
#define CSSELR_EL1 2 /* Cache Size Selection Register */
#define SCTLR_EL1 3 /* System Control Register */
#define ACTLR_EL1 4 /* Auxilliary Control Register */
#define CPACR_EL1 5 /* Coprocessor Access Control */
#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
#define TCR_EL1 8 /* Translation Control Register */
#define ESR_EL1 9 /* Exception Syndrome Register */
#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
#define FAR_EL1 12 /* Fault Address Register */
#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
#define VBAR_EL1 14 /* Vector Base Address Register */
#define CONTEXTIDR_EL1 15 /* Context ID Register */
#define TPIDR_EL0 16 /* Thread ID, User R/W */
#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
/* 32bit specific registers. Keep them at the end of the range */
#define DACR32_EL2 21 /* Domain Access Control Register */
#define IFSR32_EL2 22 /* Instruction Fault Status Register */
#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
#define NR_SYS_REGS 27
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
#define NR_CP15_REGS (NR_SYS_REGS * 2)
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern char __kvm_hyp_vector[];
extern char __kvm_hyp_code_start[];
extern char __kvm_hyp_code_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
#endif /* __ARM_KVM_ASM_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/include/asm/kvm_coproc.h
* Copyright (C) 2012 Rusty Russell IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_COPROC_H__
#define __ARM64_KVM_COPROC_H__
#include <linux/kvm_host.h>
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
struct kvm_sys_reg_table {
const struct sys_reg_desc *table;
size_t num;
};
struct kvm_sys_reg_target_table {
struct kvm_sys_reg_table table64;
struct kvm_sys_reg_table table32;
};
void kvm_register_target_sys_reg_table(unsigned int target,
struct kvm_sys_reg_target_table *table);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
#define kvm_coproc_table_init kvm_sys_reg_table_init
void kvm_sys_reg_table_init(void);
struct kvm_one_reg;
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_COPROC_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/include/kvm_emulate.h
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_EMULATE_H__
#define __ARM64_KVM_EMULATE_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
}
static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
}
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
}
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
}
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return kvm_condition_valid32(vcpu);
return true;
}
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
if (vcpu_mode_is_32bit(vcpu))
kvm_skip_instr32(vcpu, is_wide_instr);
else
*vcpu_pc(vcpu) += 4;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
{
if (vcpu_mode_is_32bit(vcpu))
return vcpu_reg32(vcpu, reg_num);
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
/* Get vcpu SPSR for current mode */
static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return vcpu_spsr32(vcpu);
return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
}
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
{
u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
if (vcpu_mode_is_32bit(vcpu))
return mode > COMPAT_PSR_MODE_USR;
return mode != PSR_MODE_EL0t;
}
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.esr_el2;
}
static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.far_el2;
}
static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
}
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
}
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
}
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
}
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
}
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
}
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
}
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
}
#endif /* __ARM64_KVM_EMULATE_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/include/asm/kvm_host.h:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#define KVM_MAX_VCPUS 4
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
#define KVM_VCPU_MAX_FEATURES 2
/* We don't currently support large pages. */
#define KVM_HPAGE_GFN_SHIFT(x) 0
#define KVM_NR_PAGE_SIZES 1
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
struct kvm_vcpu;
int kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
struct kvm_arch {
/* The VMID generation used for the virt. memory system */
u64 vmid_gen;
u32 vmid;
/* 1-level 2nd stage table and lock */
spinlock_t pgd_lock;
pgd_t *pgd;
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
/* Interrupt controller */
struct vgic_dist vgic;
/* Timer */
struct arch_timer_kvm timer;
};
#define KVM_NR_MEM_OBJS 40
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
*/
struct kvm_mmu_memory_cache {
int nobjs;
void *objects[KVM_NR_MEM_OBJS];
};
struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
};
struct kvm_cpu_context {
struct kvm_regs gp_regs;
union {
u64 sys_regs[NR_SYS_REGS];
u32 cp15[NR_CP15_REGS];
};
};
typedef struct kvm_cpu_context kvm_cpu_context_t;
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
/* HYP configuration */
u64 hcr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
/*
* Anything that is not used directly from assembly code goes
* here.
*/
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;
/* IO related fields */
struct kvm_decode mmio_decode;
/* Interrupt related fields */
u64 irq_lines; /* IRQ and FIQ levels */
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */
u32 target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
bool has_run_once;
};
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
#define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
#define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)])
struct kvm_vm_stat {
u32 remote_tlb_flush;
};
struct kvm_vcpu_stat {
u32 halt_wakeup;
};
struct kvm_vcpu_init;
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
struct kvm_one_reg;
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
#define KVM_ARCH_WANT_MMU_NOTIFIER
struct kvm;
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
/* We do not have shadow page tables, hence the empty hooks */
static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
return 0;
}
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
return 0;
}
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
int kvm_perf_init(void);
int kvm_perf_teardown(void);
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
/*
* Call initialization code, and switch to the full blown
* HYP code.
*/
kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
#endif /* __ARM64_KVM_HOST_H__ */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_MMIO_H__
#define __ARM64_KVM_MMIO_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
/*
* This is annoying. The mmio code requires this, even if we don't
* need any decoding. To be fixed.
*/
struct kvm_decode {
unsigned long rt;
bool sign_extend;
};
/*
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
* which is an anonymous type. Use our own type instead.
*/
struct kvm_exit_mmio {
phys_addr_t phys_addr;
u8 data[8];
u32 len;
bool is_write;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
struct kvm_exit_mmio *mmio)
{
run->mmio.phys_addr = mmio->phys_addr;
run->mmio.len = mmio->len;
run->mmio.is_write = mmio->is_write;
memcpy(run->mmio.data, mmio->data, mmio->len);
run->exit_reason = KVM_EXIT_MMIO;
}
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
#endif /* __ARM64_KVM_MMIO_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_MMU_H__
#define __ARM64_KVM_MMU_H__
#include <asm/page.h>
#include <asm/memory.h>
/*
* As we only have the TTBR0_EL2 register, we cannot express
* "negative" addresses. This makes it impossible to directly share
* mappings with the kernel.
*
* Instead, give the HYP mode its own VA region at a fixed offset from
* the kernel by just masking the top bits (which are all ones for a
* kernel address).
*/
#define HYP_PAGE_OFFSET_SHIFT VA_BITS
#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
/*
* Our virtual mapping for the idmap-ed MMU-enable code. Must be
* shared across all the page-tables. Conveniently, we use the last
* possible page, where no kernel mapping will ever exist.
*/
#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
#ifdef __ASSEMBLY__
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
*/
.macro kern_hyp_va reg
and \reg, \reg, #HYP_PAGE_OFFSET_MASK
.endm
#else
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
/*
* Align KVM with the kernel's view of physical memory. Should be
* 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
*/
#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
/* Make sure we get the right size, and thus the right alignment */
#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
void free_hyp_pgds(void);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
#define kvm_set_pte(ptep, pte) set_pte(ptep, pte)
static inline bool kvm_is_write_fault(unsigned long esr)
{
unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
if (esr_ec == ESR_EL2_EC_IABT)
return false;
if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
return false;
return true;
}
static inline void kvm_clean_dcache_area(void *addr, size_t size) {}
static inline void kvm_clean_pgd(pgd_t *pgd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
static inline void kvm_clean_pte_entry(pte_t *pte) {}
static inline void kvm_set_s2pte_writable(pte_t *pte)
{
pte_val(*pte) |= PTE_S2_RDWR;
}
struct kvm;
static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
{
if (!icache_is_aliasing()) { /* PIPT */
unsigned long hva = gfn_to_hva(kvm, gfn);
flush_icache_range(hva, hva + PAGE_SIZE);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_PSCI_H__
#define __ARM64_KVM_PSCI_H__
bool kvm_psci_call(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_PSCI_H__ */
...@@ -90,6 +90,12 @@ ...@@ -90,6 +90,12 @@
#define MT_NORMAL_NC 3 #define MT_NORMAL_NC 3
#define MT_NORMAL 4 #define MT_NORMAL 4
/*
* Memory types for Stage-2 translation
*/
#define MT_S2_NORMAL 0xf
#define MT_S2_DEVICE_nGnRE 0x1
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr; extern phys_addr_t memstart_addr;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
/* /*
* Section * Section
*/ */
#define PMD_SECT_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
...@@ -67,6 +68,24 @@ ...@@ -67,6 +68,24 @@
#define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2) #define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2)
#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) #define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
/*
* 2nd stage PTE definitions
*/
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
/*
* EL2/HYP PTE/PMD definitions
*/
#define PMD_HYP PMD_SECT_USER
#define PTE_HYP PTE_USER
/* /*
* 40-bit physical address supported. * 40-bit physical address supported.
*/ */
......
...@@ -76,6 +76,12 @@ extern pgprot_t pgprot_default; ...@@ -76,6 +76,12 @@ extern pgprot_t pgprot_default;
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
...@@ -197,6 +203,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -197,6 +203,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) #define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{ {
*pmdp = pmd; *pmdp = pmd;
......
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/include/uapi/asm/kvm.h:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM_KVM_H__
#define __ARM_KVM_H__
#define KVM_SPSR_EL1 0
#define KVM_SPSR_SVC KVM_SPSR_EL1
#define KVM_SPSR_ABT 1
#define KVM_SPSR_UND 2
#define KVM_SPSR_IRQ 3
#define KVM_SPSR_FIQ 4
#define KVM_NR_SPSR 5
#ifndef __ASSEMBLY__
#include <asm/types.h>
#include <asm/ptrace.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */
__u64 sp_el1;
__u64 elr_el1;
__u64 spsr[KVM_NR_SPSR];
struct user_fpsimd_state fp_regs;
};
/* Supported Processor Types */
#define KVM_ARM_TARGET_AEM_V8 0
#define KVM_ARM_TARGET_FOUNDATION_V8 1
#define KVM_ARM_TARGET_CORTEX_A57 2
#define KVM_ARM_NUM_TARGETS 3
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
/* Supported VGIC address types */
#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
struct kvm_vcpu_init {
__u32 target;
__u32 features[7];
};
struct kvm_sregs {
};
struct kvm_fpu {
};
struct kvm_guest_debug_arch {
};
struct kvm_debug_exit_arch {
};
struct kvm_sync_regs {
};
struct kvm_arch_memory_slot {
};
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
/* Normal registers are mapped as coprocessor 16. */
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32))
/* Some registers need more space to represent values. */
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
/* AArch64 system registers */
#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
#define KVM_ARM_IRQ_VCPU_SHIFT 16
#define KVM_ARM_IRQ_VCPU_MASK 0xff
#define KVM_ARM_IRQ_NUM_SHIFT 0
#define KVM_ARM_IRQ_NUM_MASK 0xffff
/* irq_type field */
#define KVM_ARM_IRQ_TYPE_CPU 0
#define KVM_ARM_IRQ_TYPE_SPI 1
#define KVM_ARM_IRQ_TYPE_PPI 2
/* out-of-kernel GIC cpu interrupt injection irq_number field */
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1
/* Highest supported SPI, from VGIC_NR_IRQS */
#define KVM_ARM_IRQ_GIC_MAX 127
/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
#define KVM_PSCI_RET_SUCCESS 0
#define KVM_PSCI_RET_NI ((unsigned long)-1)
#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
#endif
#endif /* __ARM_KVM_H__ */
...@@ -104,5 +104,38 @@ int main(void) ...@@ -104,5 +104,38 @@ int main(void)
BLANK(); BLANK();
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
return 0; return 0;
} }
...@@ -17,6 +17,19 @@ ENTRY(stext) ...@@ -17,6 +17,19 @@ ENTRY(stext)
jiffies = jiffies_64; jiffies = jiffies_64;
#define HYPERVISOR_TEXT \
/* \
* Force the alignment to be compatible with \
* the vectors requirements \
*/ \
. = ALIGN(2048); \
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
*(.hyp.idmap.text) \
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
VMLINUX_SYMBOL(__hyp_text_start) = .; \
*(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .;
SECTIONS SECTIONS
{ {
/* /*
...@@ -49,6 +62,7 @@ SECTIONS ...@@ -49,6 +62,7 @@ SECTIONS
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
HYPERVISOR_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
. = ALIGN(16); . = ALIGN(16);
...@@ -116,3 +130,9 @@ SECTIONS ...@@ -116,3 +130,9 @@ SECTIONS
STABS_DEBUG STABS_DEBUG
.comment 0 : { *(.comment) } .comment 0 : { *(.comment) }
} }
/*
* The HYP init code can't be more than a page long.
*/
ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
"HYP init code too big")
#
# Makefile for Kernel-based Virtual Machine module
#
ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
KVM=../../../virt/kvm
ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
/*
* (not much of an) Emulation layer for 32bit guests.
*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* based on arch/arm/kvm/emulate.c
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
/*
* stolen from arch/arm/kernel/opcodes.c
*
* condition code lookup table
* index into the table is test code: EQ, NE, ... LT, GT, AL, NV
*
* bit position in short is condition code: NZCV
*/
static const unsigned short cc_map[16] = {
0xF0F0, /* EQ == Z set */
0x0F0F, /* NE */
0xCCCC, /* CS == C set */
0x3333, /* CC */
0xFF00, /* MI == N set */
0x00FF, /* PL */
0xAAAA, /* VS == V set */
0x5555, /* VC */
0x0C0C, /* HI == C set && Z clear */
0xF3F3, /* LS == C clear || Z set */
0xAA55, /* GE == (N==V) */
0x55AA, /* LT == (N!=V) */
0x0A05, /* GT == (!Z && (N==V)) */
0xF5FA, /* LE == (Z || (N!=V)) */
0xFFFF, /* AL always */
0 /* NV */
};
static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
if (esr & ESR_EL2_CV)
return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
return -1;
}
/*
* Check if a trapped instruction should have been executed or not.
*/
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{
unsigned long cpsr;
u32 cpsr_cond;
int cond;
/* Top two bits non-zero? Unconditional. */
if (kvm_vcpu_get_hsr(vcpu) >> 30)
return true;
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
if (cond == 0xE)
return true;
cpsr = *vcpu_cpsr(vcpu);
if (cond < 0) {
/* This can happen in Thumb mode: examine IT state. */
unsigned long it;
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
/* it == 0 => unconditional. */
if (it == 0)
return true;
/* The cond for this insn works out as the top 4 bits. */
cond = (it >> 4);
}
cpsr_cond = cpsr >> 28;
if (!((cc_map[cond] >> cpsr_cond) & 1))
return false;
return true;
}
/**
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
* @vcpu: The VCPU pointer
*
* When exceptions occur while instructions are executed in Thumb IF-THEN
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
* to do this little bit of work manually. The fields map like this:
*
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{
unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
if (!(cpsr & COMPAT_PSR_IT_MASK))
return;
cond = (cpsr & 0xe000) >> 13;
itbits = (cpsr & 0x1c00) >> (10 - 2);
itbits |= (cpsr & (0x3 << 25)) >> 25;
/* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
if ((itbits & 0x7) == 0)
itbits = cond = 0;
else
itbits = (itbits << 1) & 0x1f;
cpsr &= ~COMPAT_PSR_IT_MASK;
cpsr |= cond << 13;
cpsr |= (itbits & 0x1c) << (10 - 2);
cpsr |= (itbits & 0x3) << 25;
*vcpu_cpsr(vcpu) = cpsr;
}
/**
* kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer
*/
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
bool is_thumb;
is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
if (is_thumb && !is_wide_instr)
*vcpu_pc(vcpu) += 2;
else
*vcpu_pc(vcpu) += 4;
kvm_adjust_itstate(vcpu);
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/guest.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
return 0;
}
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
* Because the kvm_regs structure is a mix of 32, 64 and
* 128bit fields, we index it as if it was a 32bit
* array. Hence below, nr_regs is the number of entries, and
* off the index in the "array".
*/
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
int nr_regs = sizeof(*regs) / sizeof(__u32);
u32 off;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu);
int nr_regs = sizeof(*regs) / sizeof(__u32);
__uint128_t tmp;
void *valp = &tmp;
u64 off;
int err = 0;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= nr_regs ||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
err = -EFAULT;
goto out;
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR:
case COMPAT_PSR_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ:
case COMPAT_PSR_MODE_SVC:
case COMPAT_PSR_MODE_ABT:
case COMPAT_PSR_MODE_UND:
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
break;
default:
err = -EINVAL;
goto out;
}
}
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
out:
return err;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(__u32);
}
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu);
}
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
* We do core registers right here, then we apppend system regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
if (put_user(core_reg | i, uindices))
return -EFAULT;
uindices++;
}
return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
}
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
/* Register group 16 means we want a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
return kvm_arm_sys_reg_get_reg(vcpu, reg);
}
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
/* Register group 16 means we set a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
return kvm_arm_sys_reg_set_reg(vcpu, reg);
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int __attribute_const__ kvm_target_cpu(void)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_number = read_cpuid_part_number();
if (implementor != ARM_CPU_IMP_ARM)
return -EINVAL;
switch (part_number) {
case ARM_CPU_PART_AEM_V8:
return KVM_ARM_TARGET_AEM_V8;
case ARM_CPU_PART_FOUNDATION:
return KVM_ARM_TARGET_FOUNDATION_V8;
case ARM_CPU_PART_CORTEX_A57:
/* Currently handled by the generic backend */
return KVM_ARM_TARGET_CORTEX_A57;
default:
return -EINVAL;
}
}
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
unsigned int i;
int phys_target = kvm_target_cpu();
if (init->target != phys_target)
return -EINVAL;
vcpu->arch.target = phys_target;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
for (i = 0; i < sizeof(init->features) * 8; i++) {
if (init->features[i / 32] & (1 << (i % 32))) {
if (i >= KVM_VCPU_MAX_FEATURES)
return -ENOENT;
set_bit(i, vcpu->arch.features);
}
}
/* Now we know what it is, we can reset it. */
return kvm_reset_vcpu(vcpu);
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL;
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/handle_exit.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_psci_call(vcpu))
return 1;
kvm_inject_undefined(vcpu);
return 1;
}
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_psci_call(vcpu))
return 1;
kvm_inject_undefined(vcpu);
return 1;
}
/**
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* @vcpu: the vcpu pointer
*
* Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_vcpu_block(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[ESR_EL2_EC_WFI] = kvm_handle_wfi,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
[ESR_EL2_EC_HVC32] = handle_hvc,
[ESR_EL2_EC_SMC32] = handle_smc,
[ESR_EL2_EC_HVC64] = handle_hvc,
[ESR_EL2_EC_SMC64] = handle_smc,
[ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
[ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
[ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
kvm_err("Unkown exception class: hsr: %#08x\n",
(unsigned int)kvm_vcpu_get_hsr(vcpu));
BUG();
}
return arm_exit_handlers[hsr_ec];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index)
{
exit_handle_fn exit_handler;
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_TRAP:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return 0;
}
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
.text
.pushsection .hyp.idmap.text, "ax"
.align 11
ENTRY(__kvm_hyp_init)
ventry __invalid // Synchronous EL2t
ventry __invalid // IRQ EL2t
ventry __invalid // FIQ EL2t
ventry __invalid // Error EL2t
ventry __invalid // Synchronous EL2h
ventry __invalid // IRQ EL2h
ventry __invalid // FIQ EL2h
ventry __invalid // Error EL2h
ventry __do_hyp_init // Synchronous 64-bit EL1
ventry __invalid // IRQ 64-bit EL1
ventry __invalid // FIQ 64-bit EL1
ventry __invalid // Error 64-bit EL1
ventry __invalid // Synchronous 32-bit EL1
ventry __invalid // IRQ 32-bit EL1
ventry __invalid // FIQ 32-bit EL1
ventry __invalid // Error 32-bit EL1
__invalid:
b .
/*
* x0: HYP boot pgd
* x1: HYP pgd
* x2: HYP stack
* x3: HYP vectors
*/
__do_hyp_init:
msr ttbr0_el2, x0
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
and x4, x4, x5
ldr x5, =TCR_EL2_FLAGS
orr x4, x4, x5
msr tcr_el2, x4
ldr x4, =VTCR_EL2_FLAGS
msr vtcr_el2, x4
mrs x4, mair_el1
msr mair_el2, x4
isb
mov x4, #SCTLR_EL2_FLAGS
msr sctlr_el2, x4
isb
/* MMU is now enabled. Get ready for the trampoline dance */
ldr x4, =TRAMPOLINE_VA
adr x5, target
bfi x4, x5, #0, #PAGE_SHIFT
br x4
target: /* We're now in the trampoline code, switch page tables */
msr ttbr0_el2, x1
isb
/* Invalidate the old TLBs */
tlbi alle2
dsb sy
/* Set the stack and new vectors */
kern_hyp_va x2
mov sp, x2
kern_hyp_va x3
msr vbar_el2, x3
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)
.ltorg
.popsection
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
.text
.pushsection .hyp.text, "ax"
.align PAGE_SHIFT
__kvm_hyp_code_start:
.globl __kvm_hyp_code_start
.macro save_common_regs
// x2: base address for cpu context
// x3: tmp register
add x3, x2, #CPU_XREG_OFFSET(19)
stp x19, x20, [x3]
stp x21, x22, [x3, #16]
stp x23, x24, [x3, #32]
stp x25, x26, [x3, #48]
stp x27, x28, [x3, #64]
stp x29, lr, [x3, #80]
mrs x19, sp_el0
mrs x20, elr_el2 // EL1 PC
mrs x21, spsr_el2 // EL1 pstate
stp x19, x20, [x3, #96]
str x21, [x3, #112]
mrs x22, sp_el1
mrs x23, elr_el1
mrs x24, spsr_el1
str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
.endm
.macro restore_common_regs
// x2: base address for cpu context
// x3: tmp register
ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
msr sp_el1, x22
msr elr_el1, x23
msr spsr_el1, x24
add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
ldp x19, x20, [x3]
ldr x21, [x3, #16]
msr sp_el0, x19
msr elr_el2, x20 // EL1 PC
msr spsr_el2, x21 // EL1 pstate
add x3, x2, #CPU_XREG_OFFSET(19)
ldp x19, x20, [x3]
ldp x21, x22, [x3, #16]
ldp x23, x24, [x3, #32]
ldp x25, x26, [x3, #48]
ldp x27, x28, [x3, #64]
ldp x29, lr, [x3, #80]
.endm
.macro save_host_regs
save_common_regs
.endm
.macro restore_host_regs
restore_common_regs
.endm
.macro save_fpsimd
// x2: cpu context address
// x3, x4: tmp regs
add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
fpsimd_save x3, 4
.endm
.macro restore_fpsimd
// x2: cpu context address
// x3, x4: tmp regs
add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
fpsimd_restore x3, 4
.endm
.macro save_guest_regs
// x0 is the vcpu address
// x1 is the return code, do not corrupt!
// x2 is the cpu context
// x3 is a tmp register
// Guest's x0-x3 are on the stack
// Compute base to save registers
add x3, x2, #CPU_XREG_OFFSET(4)
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
stp x8, x9, [x3, #32]
stp x10, x11, [x3, #48]
stp x12, x13, [x3, #64]
stp x14, x15, [x3, #80]
stp x16, x17, [x3, #96]
str x18, [x3, #112]
pop x6, x7 // x2, x3
pop x4, x5 // x0, x1
add x3, x2, #CPU_XREG_OFFSET(0)
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
save_common_regs
.endm
.macro restore_guest_regs
// x0 is the vcpu address.
// x2 is the cpu context
// x3 is a tmp register
// Prepare x0-x3 for later restore
add x3, x2, #CPU_XREG_OFFSET(0)
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
push x4, x5 // Push x0-x3 on the stack
push x6, x7
// x4-x18
ldp x4, x5, [x3, #32]
ldp x6, x7, [x3, #48]
ldp x8, x9, [x3, #64]
ldp x10, x11, [x3, #80]
ldp x12, x13, [x3, #96]
ldp x14, x15, [x3, #112]
ldp x16, x17, [x3, #128]
ldr x18, [x3, #144]
// x19-x29, lr, sp*, elr*, spsr*
restore_common_regs
// Last bits of the 64bit state
pop x2, x3
pop x0, x1
// Do not touch any register after this!
.endm
/*
* Macros to perform system register save/restore.
*
* Ordering here is absolutely critical, and must be kept consistent
* in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
* and in kvm_asm.h.
*
* In other words, don't touch any of these unless you know what
* you are doing.
*/
.macro save_sysregs
// x2: base address for cpu context
// x3: tmp register
add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
mrs x4, vmpidr_el2
mrs x5, csselr_el1
mrs x6, sctlr_el1
mrs x7, actlr_el1
mrs x8, cpacr_el1
mrs x9, ttbr0_el1
mrs x10, ttbr1_el1
mrs x11, tcr_el1
mrs x12, esr_el1
mrs x13, afsr0_el1
mrs x14, afsr1_el1
mrs x15, far_el1
mrs x16, mair_el1
mrs x17, vbar_el1
mrs x18, contextidr_el1
mrs x19, tpidr_el0
mrs x20, tpidrro_el0
mrs x21, tpidr_el1
mrs x22, amair_el1
mrs x23, cntkctl_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
stp x8, x9, [x3, #32]
stp x10, x11, [x3, #48]
stp x12, x13, [x3, #64]
stp x14, x15, [x3, #80]
stp x16, x17, [x3, #96]
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
.endm
.macro restore_sysregs
// x2: base address for cpu context
// x3: tmp register
add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
ldp x8, x9, [x3, #32]
ldp x10, x11, [x3, #48]
ldp x12, x13, [x3, #64]
ldp x14, x15, [x3, #80]
ldp x16, x17, [x3, #96]
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
msr vmpidr_el2, x4
msr csselr_el1, x5
msr sctlr_el1, x6
msr actlr_el1, x7
msr cpacr_el1, x8
msr ttbr0_el1, x9
msr ttbr1_el1, x10
msr tcr_el1, x11
msr esr_el1, x12
msr afsr0_el1, x13
msr afsr1_el1, x14
msr far_el1, x15
msr mair_el1, x16
msr vbar_el1, x17
msr contextidr_el1, x18
msr tpidr_el0, x19
msr tpidrro_el0, x20
msr tpidr_el1, x21
msr amair_el1, x22
msr cntkctl_el1, x23
.endm
.macro skip_32bit_state tmp, target
// Skip 32bit state if not needed
mrs \tmp, hcr_el2
tbnz \tmp, #HCR_RW_SHIFT, \target
.endm
.macro skip_tee_state tmp, target
// Skip ThumbEE state if not needed
mrs \tmp, id_pfr0_el1
tbz \tmp, #12, \target
.endm
.macro save_guest_32bit_state
skip_32bit_state x3, 1f
add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
mrs x4, spsr_abt
mrs x5, spsr_und
mrs x6, spsr_irq
mrs x7, spsr_fiq
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
mrs x6, fpexc32_el2
mrs x7, dbgvcr32_el2
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
mrs x4, teecr32_el1
mrs x5, teehbr32_el1
stp x4, x5, [x3]
1:
.endm
.macro restore_guest_32bit_state
skip_32bit_state x3, 1f
add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
msr spsr_abt, x4
msr spsr_und, x5
msr spsr_irq, x6
msr spsr_fiq, x7
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
ldp x6, x7, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
msr fpexc32_el2, x6
msr dbgvcr32_el2, x7
skip_tee_state x8, 1f
add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
ldp x4, x5, [x3]
msr teecr32_el1, x4
msr teehbr32_el1, x5
1:
.endm
.macro activate_traps
ldr x2, [x0, #VCPU_IRQ_LINES]
ldr x1, [x0, #VCPU_HCR_EL2]
orr x2, x2, x1
msr hcr_el2, x2
ldr x2, =(CPTR_EL2_TTA)
msr cptr_el2, x2
ldr x2, =(1 << 15) // Trap CP15 Cr=15
msr hstr_el2, x2
mrs x2, mdcr_el2
and x2, x2, #MDCR_EL2_HPMN_MASK
orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
msr mdcr_el2, x2
.endm
.macro deactivate_traps
mov x2, #HCR_RW
msr hcr_el2, x2
msr cptr_el2, xzr
msr hstr_el2, xzr
mrs x2, mdcr_el2
and x2, x2, #MDCR_EL2_HPMN_MASK
msr mdcr_el2, x2
.endm
.macro activate_vm
ldr x1, [x0, #VCPU_KVM]
kern_hyp_va x1
ldr x2, [x1, #KVM_VTTBR]
msr vttbr_el2, x2
.endm
.macro deactivate_vm
msr vttbr_el2, xzr
.endm
/*
* Save the VGIC CPU state into memory
* x0: Register pointing to VCPU struct
* Do not corrupt x1!!!
*/
.macro save_vgic_state
/* Get VGIC VCTRL base into x2 */
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr x2, [x2, #KVM_VGIC_VCTRL]
kern_hyp_va x2
cbz x2, 2f // disabled
/* Compute the address of struct vgic_cpu */
add x3, x0, #VCPU_VGIC_CPU
/* Save all interesting registers */
ldr w4, [x2, #GICH_HCR]
ldr w5, [x2, #GICH_VMCR]
ldr w6, [x2, #GICH_MISR]
ldr w7, [x2, #GICH_EISR0]
ldr w8, [x2, #GICH_EISR1]
ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR]
str w4, [x3, #VGIC_CPU_HCR]
str w5, [x3, #VGIC_CPU_VMCR]
str w6, [x3, #VGIC_CPU_MISR]
str w7, [x3, #VGIC_CPU_EISR]
str w8, [x3, #(VGIC_CPU_EISR + 4)]
str w9, [x3, #VGIC_CPU_ELRSR]
str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
str w11, [x3, #VGIC_CPU_APR]
/* Clear GICH_HCR */
str wzr, [x2, #GICH_HCR]
/* Save list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x2], #4
str w5, [x3], #4
sub w4, w4, #1
cbnz w4, 1b
2:
.endm
/*
* Restore the VGIC CPU state from memory
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_state
/* Get VGIC VCTRL base into x2 */
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr x2, [x2, #KVM_VGIC_VCTRL]
kern_hyp_va x2
cbz x2, 2f // disabled
/* Compute the address of struct vgic_cpu */
add x3, x0, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
ldr w4, [x3, #VGIC_CPU_HCR]
ldr w5, [x3, #VGIC_CPU_VMCR]
ldr w6, [x3, #VGIC_CPU_APR]
str w4, [x2, #GICH_HCR]
str w5, [x2, #GICH_VMCR]
str w6, [x2, #GICH_APR]
/* Restore list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x3], #4
str w5, [x2], #4
sub w4, w4, #1
cbnz w4, 1b
2:
.endm
.macro save_timer_state
// x0: vcpu pointer
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr w3, [x2, #KVM_TIMER_ENABLED]
cbz w3, 1f
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
bic x3, x3, #1 // Clear Enable
msr cntv_ctl_el0, x3
isb
mrs x3, cntv_cval_el0
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
1:
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
msr cnthctl_el2, x2
// Clear cntvoff for the host
msr cntvoff_el2, xzr
.endm
.macro restore_timer_state
// x0: vcpu pointer
// Disallow physical timer access for the guest
// Physical counter access is allowed
mrs x2, cnthctl_el2
orr x2, x2, #1
bic x2, x2, #2
msr cnthctl_el2, x2
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr w3, [x2, #KVM_TIMER_ENABLED]
cbz w3, 1f
ldr x3, [x2, #KVM_TIMER_CNTVOFF]
msr cntvoff_el2, x3
ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
msr cntv_cval_el0, x2
isb
ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
and x2, x2, #3
msr cntv_ctl_el0, x2
1:
.endm
__save_sysregs:
save_sysregs
ret
__restore_sysregs:
restore_sysregs
ret
__save_fpsimd:
save_fpsimd
ret
__restore_fpsimd:
restore_fpsimd
ret
/*
* u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
*
* This is the world switch. The first half of the function
* deals with entering the guest, and anything from __kvm_vcpu_return
* to the end of the function deals with reentering the host.
* On the enter path, only x0 (vcpu pointer) must be preserved until
* the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
* code) must both be preserved until the epilogue.
* In both cases, x2 points to the CPU context we're saving/restoring from/to.
*/
ENTRY(__kvm_vcpu_run)
kern_hyp_va x0
msr tpidr_el2, x0 // Save the vcpu register
// Host context
ldr x2, [x0, #VCPU_HOST_CONTEXT]
kern_hyp_va x2
save_host_regs
bl __save_fpsimd
bl __save_sysregs
activate_traps
activate_vm
restore_vgic_state
restore_timer_state
// Guest context
add x2, x0, #VCPU_CONTEXT
bl __restore_sysregs
bl __restore_fpsimd
restore_guest_32bit_state
restore_guest_regs
// That's it, no more messing around.
eret
__kvm_vcpu_return:
// Assume x0 is the vcpu pointer, x1 the return code
// Guest's x0-x3 are on the stack
// Guest context
add x2, x0, #VCPU_CONTEXT
save_guest_regs
bl __save_fpsimd
bl __save_sysregs
save_guest_32bit_state
save_timer_state
save_vgic_state
deactivate_traps
deactivate_vm
// Host context
ldr x2, [x0, #VCPU_HOST_CONTEXT]
kern_hyp_va x2
bl __restore_sysregs
bl __restore_fpsimd
restore_host_regs
mov x0, x1
ret
END(__kvm_vcpu_run)
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa)
kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2
isb
/*
* We could do so much better if we had the VA as well.
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
tlbi ipas2e1is, x1
dsb sy
tlbi vmalle1is
dsb sy
isb
msr vttbr_el2, xzr
ret
ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context)
tlbi alle1is
ic ialluis
dsb sy
ret
ENDPROC(__kvm_flush_vm_context)
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
// Otherwise restore a minimal host context before panicing.
mrs x0, vttbr_el2
cbz x0, 1f
mrs x0, tpidr_el2
deactivate_traps
deactivate_vm
ldr x2, [x0, #VCPU_HOST_CONTEXT]
kern_hyp_va x2
bl __restore_sysregs
1: adr x0, __hyp_panic_str
adr x1, 2f
ldp x2, x3, [x1]
sub x0, x0, x2
add x0, x0, x3
mrs x1, spsr_el2
mrs x2, elr_el2
mrs x3, esr_el2
mrs x4, far_el2
mrs x5, hpfar_el2
mrs x6, par_el1
mrs x7, tpidr_el2
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
msr elr_el2, lr
eret
.align 3
2: .quad HYP_PAGE_OFFSET
.quad PAGE_OFFSET
ENDPROC(__kvm_hyp_panic)
__hyp_panic_str:
.ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
.align 2
ENTRY(kvm_call_hyp)
hvc #0
ret
ENDPROC(kvm_call_hyp)
.macro invalid_vector label, target
.align 2
\label:
b \target
ENDPROC(\label)
.endm
/* None of these should ever happen */
invalid_vector el2t_sync_invalid, __kvm_hyp_panic
invalid_vector el2t_irq_invalid, __kvm_hyp_panic
invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
invalid_vector el2t_error_invalid, __kvm_hyp_panic
invalid_vector el2h_sync_invalid, __kvm_hyp_panic
invalid_vector el2h_irq_invalid, __kvm_hyp_panic
invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
invalid_vector el2h_error_invalid, __kvm_hyp_panic
invalid_vector el1_sync_invalid, __kvm_hyp_panic
invalid_vector el1_irq_invalid, __kvm_hyp_panic
invalid_vector el1_fiq_invalid, __kvm_hyp_panic
invalid_vector el1_error_invalid, __kvm_hyp_panic
el1_sync: // Guest trapped into EL2
push x0, x1
push x2, x3
mrs x1, esr_el2
lsr x2, x1, #ESR_EL2_EC_SHIFT
cmp x2, #ESR_EL2_EC_HVC64
b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
cbnz x3, el1_trap // called HVC
/* Here, we're pretty sure the host called HVC. */
pop x2, x3
pop x0, x1
push lr, xzr
/*
* Compute the function address in EL2, and shuffle the parameters.
*/
kern_hyp_va x0
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
pop lr, xzr
eret
el1_trap:
/*
* x1: ESR
* x2: ESR_EC
*/
cmp x2, #ESR_EL2_EC_DABT
mov x0, #ESR_EL2_EC_IABT
ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
and x2, x1, #ESR_EL2_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
/*
* Check for Stage-1 page table walk, which is guaranteed
* to give a valid HPFAR_EL2.
*/
tbnz x1, #7, 1f // S1PTW is set
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
* Stage-1 translation already validated the memory access rights.
* As such, we can use the EL1 translation regime, and don't have
* to distinguish between EL0 and EL1 access.
*/
mrs x2, far_el2
at s1e1r, x2
isb
/* Read result */
mrs x3, par_el1
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
b 2f
1: mrs x3, hpfar_el2
mrs x2, far_el2
2: mrs x0, tpidr_el2
str x1, [x0, #VCPU_ESR_EL2]
str x2, [x0, #VCPU_FAR_EL2]
str x3, [x0, #VCPU_HPFAR_EL2]
mov x1, #ARM_EXCEPTION_TRAP
b __kvm_vcpu_return
/*
* Translation failed. Just return to the guest and
* let it fault again. Another CPU is probably playing
* behind our back.
*/
3: pop x2, x3
pop x0, x1
eret
el1_irq:
push x0, x1
push x2, x3
mrs x0, tpidr_el2
mov x1, #ARM_EXCEPTION_IRQ
b __kvm_vcpu_return
.ltorg
.align 11
ENTRY(__kvm_hyp_vector)
ventry el2t_sync_invalid // Synchronous EL2t
ventry el2t_irq_invalid // IRQ EL2t
ventry el2t_fiq_invalid // FIQ EL2t
ventry el2t_error_invalid // Error EL2t
ventry el2h_sync_invalid // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h
ventry el2h_error_invalid // Error EL2h
ventry el1_sync // Synchronous 64-bit EL1
ventry el1_irq // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1
ventry el1_error_invalid // Error 64-bit EL1
ventry el1_sync // Synchronous 32-bit EL1
ventry el1_irq // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
__kvm_hyp_code_end:
.globl __kvm_hyp_code_end
.popsection
/*
* Fault injection for both 32 and 64bit guests.
*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Based on arch/arm/kvm/emulate.c
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/esr.h>
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
PSR_I_BIT | PSR_D_BIT)
#define EL1_EXCEPT_SYNC_OFFSET 0x200
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
u32 return_offset = (is_thumb) ? 4 : 0;
u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
cpsr = mode | COMPAT_PSR_I_BIT;
if (sctlr & (1 << 30))
cpsr |= COMPAT_PSR_T_BIT;
if (sctlr & (1 << 25))
cpsr |= COMPAT_PSR_E_BIT;
*vcpu_cpsr(vcpu) = cpsr;
/* Note: These now point to the banked copies */
*vcpu_spsr(vcpu) = new_spsr_value;
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
if (sctlr & (1 << 13))
vect_offset += 0xffff0000;
else /* always have security exceptions */
vect_offset += vcpu_cp15(vcpu, c12_VBAR);
*vcpu_pc(vcpu) = vect_offset;
}
static void inject_undef32(struct kvm_vcpu *vcpu)
{
prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
}
/*
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
* pseudocode.
*/
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
unsigned long addr)
{
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
if (is_pabt) {
vect_offset = 12;
far = &vcpu_cp15(vcpu, c6_IFAR);
fsr = &vcpu_cp15(vcpu, c5_IFSR);
} else { /* !iabt */
vect_offset = 16;
far = &vcpu_cp15(vcpu, c6_DFAR);
fsr = &vcpu_cp15(vcpu, c5_DFSR);
}
prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
*far = addr;
/* Give the guest an IMPLEMENTATION DEFINED exception */
is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
if (is_lpae)
*fsr = 1 << 9 | 0x34;
else
*fsr = 0x14;
}
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
bool is_aarch32;
u32 esr = 0;
is_aarch32 = vcpu_mode_is_32bit(vcpu);
*vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
/*
* Build an {i,d}abort, depending on the level and the
* instruction set. Report an external synchronous abort.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_EL1_IL;
/*
* Here, the guest runs in AArch64 mode when in EL1. If we get
* an AArch32 fault, it means we managed to trap an EL0 fault.
*/
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
else
esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
if (!is_iabt)
esr |= ESR_EL1_EC_DABT_EL0;
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
}
static void inject_undef64(struct kvm_vcpu *vcpu)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
*vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
/*
* Build an unknown exception, depending on the instruction
* set.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_EL1_IL;
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
}
/**
* kvm_inject_dabt - inject a data abort into the guest
* @vcpu: The VCPU to receive the undefined exception
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, false, addr);
inject_abt64(vcpu, false, addr);
}
/**
* kvm_inject_pabt - inject a prefetch abort into the guest
* @vcpu: The VCPU to receive the undefined exception
* @addr: The address to report in the DFAR
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, true, addr);
inject_abt64(vcpu, true, addr);
}
/**
* kvm_inject_undefined - inject an undefined instruction into the guest
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_undef32(vcpu);
inject_undef64(vcpu);
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/emulate.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/ptrace.h>
#define VCPU_NR_MODES 6
#define REG_OFFSET(_reg) \
(offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
/* USR Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
REG_OFFSET(pc)
},
/* FIQ Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
REG_OFFSET(compat_r8_fiq), /* r8 */
REG_OFFSET(compat_r9_fiq), /* r9 */
REG_OFFSET(compat_r10_fiq), /* r10 */
REG_OFFSET(compat_r11_fiq), /* r11 */
REG_OFFSET(compat_r12_fiq), /* r12 */
REG_OFFSET(compat_sp_fiq), /* r13 */
REG_OFFSET(compat_lr_fiq), /* r14 */
REG_OFFSET(pc)
},
/* IRQ Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(compat_sp_irq), /* r13 */
REG_OFFSET(compat_lr_irq), /* r14 */
REG_OFFSET(pc)
},
/* SVC Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(compat_sp_svc), /* r13 */
REG_OFFSET(compat_lr_svc), /* r14 */
REG_OFFSET(pc)
},
/* ABT Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(compat_sp_abt), /* r13 */
REG_OFFSET(compat_lr_abt), /* r14 */
REG_OFFSET(pc)
},
/* UND Registers */
{
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(compat_sp_und), /* r13 */
REG_OFFSET(compat_lr_und), /* r14 */
REG_OFFSET(pc)
},
};
/*
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
break;
case COMPAT_PSR_MODE_ABT:
mode = 4;
break;
case COMPAT_PSR_MODE_UND:
mode = 5;
break;
case COMPAT_PSR_MODE_SYS:
mode = 0; /* SYS maps to USR */
break;
default:
BUG();
}
return reg_array + vcpu_reg_offsets[mode][reg_num];
}
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_SVC:
mode = KVM_SPSR_SVC;
break;
case COMPAT_PSR_MODE_ABT:
mode = KVM_SPSR_ABT;
break;
case COMPAT_PSR_MODE_UND:
mode = KVM_SPSR_UND;
break;
case COMPAT_PSR_MODE_IRQ:
mode = KVM_SPSR_IRQ;
break;
case COMPAT_PSR_MODE_FIQ:
mode = KVM_SPSR_FIQ;
break;
default:
BUG();
}
return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/reset.c
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <kvm/arm_arch_timer.h>
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
/*
* ARMv8 Reset Values
*/
static const struct kvm_regs default_regs_reset = {
.regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
PSR_F_BIT | PSR_D_BIT),
};
static const struct kvm_regs default_regs_reset32 = {
.regs.pstate = (COMPAT_PSR_MODE_SVC | COMPAT_PSR_A_BIT |
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
};
static const struct kvm_irq_level default_vtimer_irq = {
.irq = 27,
.level = 1,
};
static bool cpu_has_32bit_el1(void)
{
u64 pfr0;
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
return !!(pfr0 & 0x20);
}
int kvm_arch_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_ARM_EL1_32BIT:
r = cpu_has_32bit_el1();
break;
default:
r = 0;
}
return r;
}
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architectually defined reset
* values.
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
const struct kvm_irq_level *cpu_vtimer_irq;
const struct kvm_regs *cpu_reset;
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
cpu_vtimer_irq = &default_vtimer_irq;
break;
}
/* Reset core registers */
memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
/* Reset timer */
kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
return 0;
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/coproc.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.com.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <trace/events/kvm.h>
#include "sys_regs.h"
/*
* All of this file is extremly similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
* easy to merge, but that would be an ABI breakage -- again. VFP
* would also need to be abstracted.
*
* For AArch32, we only take care of what is being trapped. Anything
* that has to do with init and userspace access has to go via the
* 64bit interface.
*/
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
static u32 cache_levels;
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
#define CSSELR_MAX 12
/* Which cache CCSIDR represents depends on CSSELR value. */
static u32 get_ccsidr(u32 csselr)
{
u32 ccsidr;
/* Make sure noone else changes CSSELR during this! */
local_irq_disable();
/* Put value into CSSELR */
asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
isb();
/* Read result out of CCSIDR */
asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
local_irq_enable();
return ccsidr;
}
static void do_dc_cisw(u32 val)
{
asm volatile("dc cisw, %x0" : : "r" (val));
dsb();
}
static void do_dc_csw(u32 val)
{
asm volatile("dc csw, %x0" : : "r" (val));
dsb();
}
/* See note at ARM ARM B1.14.4 */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
int cpu;
if (!p->is_write)
return read_from_write_only(vcpu, p);
cpu = get_cpu();
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}
val = *vcpu_reg(vcpu, p->Rt);
switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
do_dc_cisw(val);
break;
case 10: /* DCCSW */
do_dc_csw(val);
break;
}
done:
put_cpu();
return true;
}
/*
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
* NAKed, so it will read the PMCR anyway.
*
* Therefore we tell the guest we have 0 counters. Unfortunately, we
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*/
static bool pm_fake(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
else
return read_zero(vcpu, p);
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
}
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
/*
* Simply map the vcpu_id into the Aff0 field of the MPIDR.
*/
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
}
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
*/
static const struct sys_reg_desc sys_reg_descs[] = {
/* DC ISW */
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
access_dcsw },
/* DC CSW */
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
access_dcsw },
/* DC CISW */
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
access_dcsw },
/* TEECR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEECR32_EL1, 0 },
/* TEEHBR32_EL1 */
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
NULL, reset_val, TEEHBR32_EL1, 0 },
/* DBGVCR32_EL2 */
{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
NULL, reset_val, DBGVCR32_EL2, 0 },
/* MPIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
NULL, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
/* TTBR0_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, TTBR0_EL1 },
/* TTBR1_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
NULL, reset_unknown, TTBR1_EL1 },
/* TCR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
NULL, reset_val, TCR_EL1, 0 },
/* AFSR0_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
NULL, reset_unknown, AFSR0_EL1 },
/* AFSR1_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
NULL, reset_unknown, AFSR1_EL1 },
/* ESR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
NULL, reset_unknown, ESR_EL1 },
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
pm_fake },
/* PMINTENCLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
pm_fake },
/* MAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
NULL, reset_unknown, MAIR_EL1 },
/* AMAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
NULL, reset_amair_el1, AMAIR_EL1 },
/* VBAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
/* CONTEXTIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
NULL, reset_val, CONTEXTIDR_EL1, 0 },
/* TPIDR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
NULL, reset_unknown, TPIDR_EL1 },
/* CNTKCTL_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
NULL, reset_val, CNTKCTL_EL1, 0},
/* CSSELR_EL1 */
{ Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, CSSELR_EL1 },
/* PMCR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
pm_fake },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
pm_fake },
/* PMCNTENCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
pm_fake },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
pm_fake },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
pm_fake },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
pm_fake },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
pm_fake },
/* PMCEID1_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
pm_fake },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
pm_fake },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
pm_fake },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
pm_fake },
/* PMUSERENR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
pm_fake },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
pm_fake },
/* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
NULL, reset_unknown, TPIDR_EL0 },
/* TPIDRRO_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
NULL, reset_unknown, TPIDRRO_EL0 },
/* DACR32_EL2 */
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, DACR32_EL2 },
/* IFSR32_EL2 */
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
NULL, reset_unknown, IFSR32_EL2 },
/* FPEXC32_EL2 */
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
NULL, reset_val, FPEXC32_EL2, 0x70 },
};
/* Trapped cp15 registers */
static const struct sys_reg_desc cp15_regs[] = {
/*
* DC{C,I,CI}SW operations:
*/
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
};
/* Target specific emulation tables */
static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
void kvm_register_target_sys_reg_table(unsigned int target,
struct kvm_sys_reg_target_table *table)
{
target_tables[target] = table;
}
/* Get specific register table for this target. */
static const struct sys_reg_desc *get_target_table(unsigned target,
bool mode_is_64,
size_t *num)
{
struct kvm_sys_reg_target_table *table;
table = target_tables[target];
if (mode_is_64) {
*num = table->table64.num;
return table->table64.table;
} else {
*num = table->table32.num;
return table->table32.table;
}
}
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num)
{
unsigned int i;
for (i = 0; i < num; i++) {
const struct sys_reg_desc *r = &table[i];
if (params->Op0 != r->Op0)
continue;
if (params->Op1 != r->Op1)
continue;
if (params->CRn != r->CRn)
continue;
if (params->CRm != r->CRm)
continue;
if (params->Op2 != r->Op2)
continue;
return r;
}
return NULL;
}
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
kvm_inject_undefined(vcpu);
return 1;
}
static void emulate_cp15(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
size_t num;
const struct sys_reg_desc *table, *r;
table = get_target_table(vcpu->arch.target, false, &num);
/* Search target-specific then generic table. */
r = find_reg(params, table, num);
if (!r)
r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
if (likely(r)) {
/*
* Not having an accessor means that we have
* configured a trap that we don't know how to
* handle. This certainly qualifies as a gross bug
* that should be fixed right away.
*/
BUG_ON(!r->access);
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return;
}
/* If access function fails, it should complain. */
}
kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
/**
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt2 = (hsr >> 10) & 0xf;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
params.Op0 = 0;
params.Op1 = (hsr >> 16) & 0xf;
params.Op2 = 0;
params.CRn = 0;
/*
* Massive hack here. Store Rt2 in the top 32bits so we only
* have one register to deal with. As we use the same trap
* backends between AArch32 and AArch64, we get away with it.
*/
if (params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val &= 0xffffffff;
val |= *vcpu_reg(vcpu, Rt2) << 32;
*vcpu_reg(vcpu, params.Rt) = val;
}
emulate_cp15(vcpu, &params);
/* Do the opposite hack for the read side */
if (!params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val >>= 32;
*vcpu_reg(vcpu, Rt2) = val;
}
return 1;
}
/**
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
params.CRn = (hsr >> 10) & 0xf;
params.Op0 = 0;
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
emulate_cp15(vcpu, &params);
return 1;
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
size_t num;
const struct sys_reg_desc *table, *r;
table = get_target_table(vcpu->arch.target, true, &num);
/* Search target-specific then generic table. */
r = find_reg(params, table, num);
if (!r)
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
if (likely(r)) {
/*
* Not having an accessor means that we have
* configured a trap that we don't know how to
* handle. This certainly qualifies as a gross bug
* that should be fixed right away.
*/
BUG_ON(!r->access);
if (likely(r->access(vcpu, params, r))) {
/* Skip instruction, since it was emulated */
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/* If access function fails, it should complain. */
} else {
kvm_err("Unsupported guest sys_reg access at: %lx\n",
*vcpu_pc(vcpu));
print_sys_reg_instr(params);
}
kvm_inject_undefined(vcpu);
return 1;
}
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *table, size_t num)
{
unsigned long i;
for (i = 0; i < num; i++)
if (table[i].reset)
table[i].reset(vcpu, &table[i]);
}
/**
* kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
* @vcpu: The VCPU pointer
* @run: The kvm_run struct
*/
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
params.Op0 = (esr >> 20) & 3;
params.Op1 = (esr >> 14) & 0x7;
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
params.Rt = (esr >> 5) & 0x1f;
params.is_write = !(esr & 1);
return emulate_sys_reg(vcpu, &params);
}
/******************************************************************************
* Userspace API
*****************************************************************************/
static bool index_to_params(u64 id, struct sys_reg_params *params)
{
switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U64:
/* Any unused index bits means it's not valid. */
if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
| KVM_REG_ARM_COPROC_MASK
| KVM_REG_ARM64_SYSREG_OP0_MASK
| KVM_REG_ARM64_SYSREG_OP1_MASK
| KVM_REG_ARM64_SYSREG_CRN_MASK
| KVM_REG_ARM64_SYSREG_CRM_MASK
| KVM_REG_ARM64_SYSREG_OP2_MASK))
return false;
params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
>> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
>> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
>> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
>> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
>> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
return true;
default:
return false;
}
}
/* Decode an index value, and find the sys_reg_desc entry. */
static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
u64 id)
{
size_t num;
const struct sys_reg_desc *table, *r;
struct sys_reg_params params;
/* We only do sys_reg for now. */
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL;
if (!index_to_params(id, &params))
return NULL;
table = get_target_table(vcpu->arch.target, true, &num);
r = find_reg(&params, table, num);
if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
/* Not saved in the sys_reg array? */
if (r && !r->reg)
r = NULL;
return r;
}
/*
* These are the invariant sys_reg registers: we let the guest see the
* host versions of these, so they're part of the guest state.
*
* A future CPU may provide a mechanism to present different values to
* the guest, or a future kvm may trap them.
*/
#define FUNCTION_INVARIANT(reg) \
static void get_##reg(struct kvm_vcpu *v, \
const struct sys_reg_desc *r) \
{ \
u64 val; \
\
asm volatile("mrs %0, " __stringify(reg) "\n" \
: "=r" (val)); \
((struct sys_reg_desc *)r)->val = val; \
}
FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1)
FUNCTION_INVARIANT(id_pfr0_el1)
FUNCTION_INVARIANT(id_pfr1_el1)
FUNCTION_INVARIANT(id_dfr0_el1)
FUNCTION_INVARIANT(id_afr0_el1)
FUNCTION_INVARIANT(id_mmfr0_el1)
FUNCTION_INVARIANT(id_mmfr1_el1)
FUNCTION_INVARIANT(id_mmfr2_el1)
FUNCTION_INVARIANT(id_mmfr3_el1)
FUNCTION_INVARIANT(id_isar0_el1)
FUNCTION_INVARIANT(id_isar1_el1)
FUNCTION_INVARIANT(id_isar2_el1)
FUNCTION_INVARIANT(id_isar3_el1)
FUNCTION_INVARIANT(id_isar4_el1)
FUNCTION_INVARIANT(id_isar5_el1)
FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1)
/* ->val is filled in by kvm_sys_reg_table_init() */
static struct sys_reg_desc invariant_sys_regs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
NULL, get_midr_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
NULL, get_revidr_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
NULL, get_id_pfr0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
NULL, get_id_pfr1_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
NULL, get_id_dfr0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
NULL, get_id_afr0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
NULL, get_id_mmfr0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
NULL, get_id_mmfr1_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
NULL, get_id_mmfr2_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
NULL, get_id_mmfr3_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
NULL, get_id_isar0_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
NULL, get_id_isar1_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
NULL, get_id_isar2_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
NULL, get_id_isar3_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
NULL, get_id_isar4_el1 },
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
NULL, get_id_isar5_el1 },
{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
NULL, get_clidr_el1 },
{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
NULL, get_aidr_el1 },
{ Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
NULL, get_ctr_el0 },
};
static int reg_from_user(void *val, const void __user *uaddr, u64 id)
{
/* This Just Works because we are little endian. */
if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int reg_to_user(void __user *uaddr, const void *val, u64 id)
{
/* This Just Works because we are little endian. */
if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
return -EFAULT;
return 0;
}
static int get_invariant_sys_reg(u64 id, void __user *uaddr)
{
struct sys_reg_params params;
const struct sys_reg_desc *r;
if (!index_to_params(id, &params))
return -ENOENT;
r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
return reg_to_user(uaddr, &r->val, id);
}
static int set_invariant_sys_reg(u64 id, void __user *uaddr)
{
struct sys_reg_params params;
const struct sys_reg_desc *r;
int err;
u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
if (!index_to_params(id, &params))
return -ENOENT;
r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
if (!r)
return -ENOENT;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/* This is what we mean by invariant: you can't change it. */
if (r->val != val)
return -EINVAL;
return 0;
}
static bool is_valid_cache(u32 val)
{
u32 level, ctype;
if (val >= CSSELR_MAX)
return -ENOENT;
/* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
level = (val >> 1);
ctype = (cache_levels >> (level * 3)) & 7;
switch (ctype) {
case 0: /* No cache */
return false;
case 1: /* Instruction cache only */
return (val & 1);
case 2: /* Data cache only */
case 4: /* Unified cache */
return !(val & 1);
case 3: /* Separate instruction and data caches */
return true;
default: /* Reserved: we can't know instruction or data. */
return false;
}
}
static int demux_c15_get(u64 id, void __user *uaddr)
{
u32 val;
u32 __user *uval = uaddr;
/* Fail if we have unknown bits set. */
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
return -ENOENT;
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
if (KVM_REG_SIZE(id) != 4)
return -ENOENT;
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
if (!is_valid_cache(val))
return -ENOENT;
return put_user(get_ccsidr(val), uval);
default:
return -ENOENT;
}
}
static int demux_c15_set(u64 id, void __user *uaddr)
{
u32 val, newval;
u32 __user *uval = uaddr;
/* Fail if we have unknown bits set. */
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
return -ENOENT;
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
if (KVM_REG_SIZE(id) != 4)
return -ENOENT;
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
if (!is_valid_cache(val))
return -ENOENT;
if (get_user(newval, uval))
return -EFAULT;
/* This is also invariant: you can't change it. */
if (newval != get_ccsidr(val))
return -EINVAL;
return 0;
default:
return -ENOENT;
}
}
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
const struct sys_reg_desc *r;
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
return -ENOENT;
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
}
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
const struct sys_reg_desc *r;
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(reg->id, uaddr);
if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
return -ENOENT;
r = index_to_sys_reg_desc(vcpu, reg->id);
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
}
static unsigned int num_demux_regs(void)
{
unsigned int i, count = 0;
for (i = 0; i < CSSELR_MAX; i++)
if (is_valid_cache(i))
count++;
return count;
}
static int write_demux_regids(u64 __user *uindices)
{
u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
unsigned int i;
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
for (i = 0; i < CSSELR_MAX; i++) {
if (!is_valid_cache(i))
continue;
if (put_user(val | i, uindices))
return -EFAULT;
uindices++;
}
return 0;
}
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
{
return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
KVM_REG_ARM64_SYSREG |
(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
}
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
{
if (!*uind)
return true;
if (put_user(sys_reg_to_index(reg), *uind))
return false;
(*uind)++;
return true;
}
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
{
const struct sys_reg_desc *i1, *i2, *end1, *end2;
unsigned int total = 0;
size_t num;
/* We check for duplicates here, to allow arch-specific overrides. */
i1 = get_target_table(vcpu->arch.target, true, &num);
end1 = i1 + num;
i2 = sys_reg_descs;
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
BUG_ON(i1 == end1 || i2 == end2);
/* Walk carefully, as both tables may refer to the same register. */
while (i1 || i2) {
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
if (cmp <= 0) {
/* Ignore registers we trap but don't save. */
if (i1->reg) {
if (!copy_reg_to_user(i1, &uind))
return -EFAULT;
total++;
}
} else {
/* Ignore registers we trap but don't save. */
if (i2->reg) {
if (!copy_reg_to_user(i2, &uind))
return -EFAULT;
total++;
}
}
if (cmp <= 0 && ++i1 == end1)
i1 = NULL;
if (cmp >= 0 && ++i2 == end2)
i2 = NULL;
}
return total;
}
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
{
return ARRAY_SIZE(invariant_sys_regs)
+ num_demux_regs()
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
}
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
int err;
/* Then give them all the invariant registers' indices. */
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
return -EFAULT;
uindices++;
}
err = walk_sys_regs(vcpu, uindices);
if (err < 0)
return err;
uindices += err;
return write_demux_regids(uindices);
}
void kvm_sys_reg_table_init(void)
{
unsigned int i;
struct sys_reg_desc clidr;
/* Make sure tables are unique and in order. */
for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
/* We abuse the reset function to overwrite the table itself. */
for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
/*
* CLIDR format is awkward, so clean it up. See ARM B4.1.20:
*
* If software reads the Cache Type fields from Ctype1
* upwards, once it has seen a value of 0b000, no caches
* exist at further-out levels of the hierarchy. So, for
* example, if Ctype3 is the first Cache Type field with a
* value of 0b000, the values of Ctype4 to Ctype7 must be
* ignored.
*/
get_clidr_el1(NULL, &clidr); /* Ugly... */
cache_levels = clidr.val;
for (i = 0; i < 7; i++)
if (((cache_levels >> (i*3)) & 7) == 0)
break;
/* Clear all higher bits. */
cache_levels &= (1 << (i*3))-1;
}
/**
* kvm_reset_sys_regs - sets system registers to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on the
* virtual CPU struct to their architecturally defined reset values.
*/
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct sys_reg_desc *table;
/* Catch someone adding a register without putting in reset entry. */
memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
/* Generic chip reset first (so target could override). */
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
table = get_target_table(vcpu->arch.target, true, &num);
reset_sys_reg_descs(vcpu, table, num);
for (num = 1; num < NR_SYS_REGS; num++)
if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
panic("Didn't reset vcpu_sys_reg(%zi)", num);
}
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Derived from arch/arm/kvm/coproc.h
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
#define __ARM64_KVM_SYS_REGS_LOCAL_H__
struct sys_reg_params {
u8 Op0;
u8 Op1;
u8 CRn;
u8 CRm;
u8 Op2;
u8 Rt;
bool is_write;
};
struct sys_reg_desc {
/* MRS/MSR instruction which accesses it. */
u8 Op0;
u8 Op1;
u8 CRn;
u8 CRm;
u8 Op2;
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct sys_reg_params *,
const struct sys_reg_desc *);
/* Initialization for vcpu. */
void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
/* Index into sys_reg[], or 0 if we don't need to save it. */
int reg;
/* Value (usually reset value) */
u64 val;
};
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
}
static inline bool ignore_write(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p)
{
return true;
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p)
{
*vcpu_reg(vcpu, p->Rt) = 0;
return true;
}
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
kvm_debug("sys_reg write to read-only register at: %lx\n",
*vcpu_pc(vcpu));
print_sys_reg_instr(params);
return false;
}
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
{
kvm_debug("sys_reg read to write-only register at: %lx\n",
*vcpu_pc(vcpu));
print_sys_reg_instr(params);
return false;
}
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
}
static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= NR_SYS_REGS);
vcpu_sys_reg(vcpu, r->reg) = r->val;
}
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
const struct sys_reg_desc *i2)
{
BUG_ON(i1 == i2);
if (!i1)
return 1;
else if (!i2)
return -1;
if (i1->Op0 != i2->Op0)
return i1->Op0 - i2->Op0;
if (i1->Op1 != i2->Op1)
return i1->Op1 - i2->Op1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
return i1->Op2 - i2->Op2;
}
#define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
#define Op2(_x) .Op2 = _x
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Based on arch/arm/kvm/coproc_a15.c:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm_host.h>
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <linux/init.h>
#include "sys_regs.h"
static bool access_actlr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
return true;
}
static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 actlr;
asm volatile("mrs %0, actlr_el1\n" : "=r" (actlr));
vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr;
}
/*
* Implementation specific sys-reg registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
*/
static const struct sys_reg_desc genericv8_sys_regs[] = {
/* ACTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
access_actlr, reset_actlr, ACTLR_EL1 },
};
static const struct sys_reg_desc genericv8_cp15_regs[] = {
/* ACTLR */
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
access_actlr },
};
static struct kvm_sys_reg_target_table genericv8_target_table = {
.table64 = {
.table = genericv8_sys_regs,
.num = ARRAY_SIZE(genericv8_sys_regs),
},
.table32 = {
.table = genericv8_cp15_regs,
.num = ARRAY_SIZE(genericv8_cp15_regs),
},
};
static int __init sys_reg_genericv8_init(void)
{
unsigned int i;
for (i = 1; i < ARRAY_SIZE(genericv8_sys_regs); i++)
BUG_ON(cmp_sys_reg(&genericv8_sys_regs[i-1],
&genericv8_sys_regs[i]) >= 0);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_AEM_V8,
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8,
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57,
&genericv8_target_table);
return 0;
}
late_initcall(sys_reg_genericv8_init);
...@@ -666,6 +666,7 @@ struct kvm_ppc_smmu_info { ...@@ -666,6 +666,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_IRQ_MPIC 90 #define KVM_CAP_IRQ_MPIC 90
#define KVM_CAP_PPC_RTAS 91 #define KVM_CAP_PPC_RTAS 91
#define KVM_CAP_IRQ_XICS 92 #define KVM_CAP_IRQ_XICS 92
#define KVM_CAP_ARM_EL1_32BIT 93
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -783,6 +784,7 @@ struct kvm_dirty_tlb { ...@@ -783,6 +784,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_IA64 0x3000000000000000ULL #define KVM_REG_IA64 0x3000000000000000ULL
#define KVM_REG_ARM 0x4000000000000000ULL #define KVM_REG_ARM 0x4000000000000000ULL
#define KVM_REG_S390 0x5000000000000000ULL #define KVM_REG_S390 0x5000000000000000ULL
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment