Commit 60dd133a authored by Marc Zyngier's avatar Marc Zyngier

Merge tag 'vgic-migrate-for-marc' of...

Merge tag 'vgic-migrate-for-marc' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into kvm-arm64/next

VGIC and timer migration pull
parents 989c6b34 fa20f5ae
...@@ -2391,7 +2391,8 @@ struct kvm_reg_list { ...@@ -2391,7 +2391,8 @@ struct kvm_reg_list {
This ioctl returns the guest registers that are supported for the This ioctl returns the guest registers that are supported for the
KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
4.85 KVM_ARM_SET_DEVICE_ADDR
4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated)
Capability: KVM_CAP_ARM_SET_DEVICE_ADDR Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
Architectures: arm, arm64 Architectures: arm, arm64
...@@ -2429,6 +2430,10 @@ must be called after calling KVM_CREATE_IRQCHIP, but before calling ...@@ -2429,6 +2430,10 @@ must be called after calling KVM_CREATE_IRQCHIP, but before calling
KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the
base addresses will return -EEXIST. base addresses will return -EEXIST.
Note, this IOCTL is deprecated and the more flexible SET/GET_DEVICE_ATTR API
should be used instead.
4.86 KVM_PPC_RTAS_DEFINE_TOKEN 4.86 KVM_PPC_RTAS_DEFINE_TOKEN
Capability: KVM_CAP_PPC_RTAS Capability: KVM_CAP_PPC_RTAS
......
ARM Virtual Generic Interrupt Controller (VGIC)
===============================================
Device types supported:
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
Only one VGIC instance may be instantiated through either this API or the
legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt
controller, requiring emulated user-space devices to inject interrupts to the
VGIC instead of directly to CPUs.
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
Attributes:
KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit)
Base address in the guest physical address space of the GIC distributor
register mappings.
KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit)
Base address in the guest physical address space of the GIC virtual cpu
interface register mappings.
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
Attributes:
The attr field of kvm_device_attr encodes two values:
bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
values: | reserved | cpu id | offset |
All distributor regs are (rw, 32-bit)
The offset is relative to the "Distributor base address" as defined in the
GICv2 specs. Getting or setting such a register has the same effect as
reading or writing the register on the actual hardware from the cpu
specified with cpu id field. Note that most distributor fields are not
banked, but return the same value regardless of the cpu id used to access
the register.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
Errors:
-ENODEV: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
KVM_DEV_ARM_VGIC_GRP_CPU_REGS
Attributes:
The attr field of kvm_device_attr encodes two values:
bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
values: | reserved | cpu id | offset |
All CPU interface regs are (rw, 32-bit)
The offset specifies the offset from the "CPU interface base address" as
defined in the GICv2 specs. Getting or setting such a register has the
same effect as reading or writing the register on the actual hardware.
The Active Priorities Registers APRn are implementation defined, so we set a
fixed format for our implementation that fits with the model of a "GICv2
implementation without the security extensions" which we present to the
guest. This interface always exposes four register APR[0-3] describing the
maximum possible 128 preemption levels. The semantics of the register
indicate if any interrupts in a given preemption level are in the active
state by setting the corresponding bit.
Thus, preemption level X has one or more active interrupts if and only if:
APRn[X mod 32] == 0b1, where n = X / 32
Bits for undefined preemption levels are RAZ/WI.
Limitations:
- Priorities are not implemented, and registers are RAZ/WI
Errors:
-ENODEV: Getting or setting this register is not yet supported
-EBUSY: One or more VCPUs are running
...@@ -225,4 +225,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) ...@@ -225,4 +225,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
int kvm_perf_init(void); int kvm_perf_init(void);
int kvm_perf_teardown(void); int kvm_perf_teardown(void);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
#endif /* __ARM_KVM_HOST_H__ */ #endif /* __ARM_KVM_HOST_H__ */
...@@ -119,6 +119,26 @@ struct kvm_arch_memory_slot { ...@@ -119,6 +119,26 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
#define KVM_REG_ARM_32_CRN_SHIFT 11 #define KVM_REG_ARM_32_CRN_SHIFT 11
#define ARM_CP15_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
#define __ARM_CP15_REG(op1,crn,crm,op2) \
(KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
#define __ARM_CP15_REG64(op1,crm) \
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
/* Normal registers are mapped as coprocessor 16. */ /* Normal registers are mapped as coprocessor 16. */
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
...@@ -143,6 +163,14 @@ struct kvm_arch_memory_slot { ...@@ -143,6 +163,14 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A #define KVM_REG_ARM_VFP_FPINST2 0x100A
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
/* KVM_IRQ_LINE irq field index values */ /* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_SHIFT 24
......
...@@ -137,6 +137,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -137,6 +137,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret) if (ret)
goto out_free_stage2_pgd; goto out_free_stage2_pgd;
kvm_timer_init(kvm);
/* Mark the initial VMID generation invalid */ /* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0; kvm->arch.vmid_gen = 0;
...@@ -188,6 +190,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -188,6 +190,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
r = vgic_present; r = vgic_present;
break; break;
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU: case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
...@@ -339,6 +342,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -339,6 +342,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
/*
* The arch-generic KVM code expects the cpu field of a vcpu to be -1
* if the vcpu is no longer assigned to a cpu. This is used for the
* optimized make_all_cpus_request path.
*/
vcpu->cpu = -1;
kvm_arm_set_running_vcpu(NULL); kvm_arm_set_running_vcpu(NULL);
} }
...@@ -462,6 +472,8 @@ static void update_vttbr(struct kvm *kvm) ...@@ -462,6 +472,8 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{ {
int ret;
if (likely(vcpu->arch.has_run_once)) if (likely(vcpu->arch.has_run_once))
return 0; return 0;
...@@ -471,9 +483,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -471,9 +483,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Initialize the VGIC before running a vcpu the first time on * Initialize the VGIC before running a vcpu the first time on
* this VM. * this VM.
*/ */
if (irqchip_in_kernel(vcpu->kvm) && if (unlikely(!vgic_initialized(vcpu->kvm))) {
unlikely(!vgic_initialized(vcpu->kvm))) { ret = kvm_vgic_init(vcpu->kvm);
int ret = kvm_vgic_init(vcpu->kvm);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -772,7 +783,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, ...@@ -772,7 +783,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
case KVM_ARM_DEVICE_VGIC_V2: case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present) if (!vgic_present)
return -ENXIO; return -ENXIO;
return kvm_vgic_set_addr(kvm, type, dev_addr->addr); return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default: default:
return -ENODEV; return -ENODEV;
} }
......
...@@ -109,6 +109,83 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -109,6 +109,83 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL; return -EINVAL;
} }
#ifndef CONFIG_KVM_ARM_TIMER
#define NUM_TIMER_REGS 0
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
return 0;
}
static bool is_timer_reg(u64 index)
{
return false;
}
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
return 0;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
return 0;
}
#else
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
{
switch (index) {
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
return true;
}
return false;
}
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
return -EFAULT;
return 0;
}
#endif
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return ret;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
}
static unsigned long num_core_regs(void) static unsigned long num_core_regs(void)
{ {
return sizeof(struct kvm_regs) / sizeof(u32); return sizeof(struct kvm_regs) / sizeof(u32);
...@@ -121,7 +198,8 @@ static unsigned long num_core_regs(void) ...@@ -121,7 +198,8 @@ static unsigned long num_core_regs(void)
*/ */
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{ {
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ NUM_TIMER_REGS;
} }
/** /**
...@@ -133,6 +211,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -133,6 +211,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{ {
unsigned int i; unsigned int i;
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
int ret;
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
if (put_user(core_reg | i, uindices)) if (put_user(core_reg | i, uindices))
...@@ -140,6 +219,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -140,6 +219,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++; uindices++;
} }
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
uindices += NUM_TIMER_REGS;
return kvm_arm_copy_coproc_indices(vcpu, uindices); return kvm_arm_copy_coproc_indices(vcpu, uindices);
} }
...@@ -153,6 +237,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -153,6 +237,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg); return get_core_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_coproc_get_reg(vcpu, reg); return kvm_arm_coproc_get_reg(vcpu, reg);
} }
...@@ -166,6 +253,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -166,6 +253,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg); return set_core_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_coproc_set_reg(vcpu, reg); return kvm_arm_coproc_set_reg(vcpu, reg);
} }
......
...@@ -129,6 +129,24 @@ struct kvm_arch_memory_slot { ...@@ -129,6 +129,24 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
(((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
(KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
/* KVM_IRQ_LINE irq field index values */ /* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24 #define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff #define KVM_ARM_IRQ_TYPE_MASK 0xff
......
...@@ -144,7 +144,7 @@ struct kvm_run; ...@@ -144,7 +144,7 @@ struct kvm_run;
struct kvm_exit_mmio; struct kvm_exit_mmio;
#ifdef CONFIG_KVM_ARM_VGIC #ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void); int kvm_vgic_hyp_init(void);
int kvm_vgic_init(struct kvm *kvm); int kvm_vgic_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm); int kvm_vgic_create(struct kvm *kvm);
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
#define GIC_CPU_EOI 0x10 #define GIC_CPU_EOI 0x10
#define GIC_CPU_RUNNINGPRI 0x14 #define GIC_CPU_RUNNINGPRI 0x14
#define GIC_CPU_HIGHPRI 0x18 #define GIC_CPU_HIGHPRI 0x18
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
#define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004 #define GIC_DIST_CTR 0x004
...@@ -56,6 +59,15 @@ ...@@ -56,6 +59,15 @@
#define GICH_LR_ACTIVE_BIT (1 << 29) #define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19) #define GICH_LR_EOI (1 << 19)
#define GICH_VMCR_CTRL_SHIFT 0
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
#define GICH_VMCR_PRIMASK_SHIFT 27
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
#define GICH_VMCR_BINPOINT_SHIFT 21
#define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT)
#define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18
#define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT)
#define GICH_MISR_EOI (1 << 0) #define GICH_MISR_EOI (1 << 0)
#define GICH_MISR_U (1 << 1) #define GICH_MISR_U (1 << 1)
......
...@@ -1075,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp); ...@@ -1075,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp);
extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops; extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_vfio_ops; extern struct kvm_device_ops kvm_vfio_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
......
...@@ -853,6 +853,7 @@ struct kvm_device_attr { ...@@ -853,6 +853,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP 1 #define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1 #define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2 #define KVM_DEV_VFIO_GROUP_DEL 2
#define KVM_DEV_TYPE_ARM_VGIC_V2 5
/* /*
* ioctls for VM fds * ioctls for VM fds
......
...@@ -182,6 +182,40 @@ static void kvm_timer_init_interrupt(void *info) ...@@ -182,6 +182,40 @@ static void kvm_timer_init_interrupt(void *info)
enable_percpu_irq(host_vtimer_irq, 0); enable_percpu_irq(host_vtimer_irq, 0);
} }
int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
timer->cntv_ctl = value;
break;
case KVM_REG_ARM_TIMER_CNT:
vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
break;
case KVM_REG_ARM_TIMER_CVAL:
timer->cntv_cval = value;
break;
default:
return -1;
}
return 0;
}
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
switch (regid) {
case KVM_REG_ARM_TIMER_CTL:
return timer->cntv_ctl;
case KVM_REG_ARM_TIMER_CNT:
return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
case KVM_REG_ARM_TIMER_CVAL:
return timer->cntv_cval;
}
return (u64)-1;
}
static int kvm_timer_cpu_notify(struct notifier_block *self, static int kvm_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu) unsigned long action, void *cpu)
......
...@@ -71,6 +71,10 @@ ...@@ -71,6 +71,10 @@
#define VGIC_ADDR_UNDEF (-1) #define VGIC_ADDR_UNDEF (-1)
#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
#define GICC_ARCH_VERSION_V2 0x2
/* Physical address of vgic virtual cpu interface */ /* Physical address of vgic virtual cpu interface */
static phys_addr_t vgic_vcpu_base; static phys_addr_t vgic_vcpu_base;
...@@ -312,7 +316,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, ...@@ -312,7 +316,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
u32 word_offset = offset & 3; u32 word_offset = offset & 3;
switch (offset & ~3) { switch (offset & ~3) {
case 0: /* CTLR */ case 0: /* GICD_CTLR */
reg = vcpu->kvm->arch.vgic.enabled; reg = vcpu->kvm->arch.vgic.enabled;
vgic_reg_access(mmio, &reg, word_offset, vgic_reg_access(mmio, &reg, word_offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
...@@ -323,15 +327,15 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, ...@@ -323,15 +327,15 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
} }
break; break;
case 4: /* TYPER */ case 4: /* GICD_TYPER */
reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
reg |= (VGIC_NR_IRQS >> 5) - 1; reg |= (VGIC_NR_IRQS >> 5) - 1;
vgic_reg_access(mmio, &reg, word_offset, vgic_reg_access(mmio, &reg, word_offset,
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
break; break;
case 8: /* IIDR */ case 8: /* GICD_IIDR */
reg = 0x4B00043B; reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
vgic_reg_access(mmio, &reg, word_offset, vgic_reg_access(mmio, &reg, word_offset,
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
break; break;
...@@ -589,6 +593,156 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, ...@@ -589,6 +593,156 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
return false; return false;
} }
#define LR_CPUID(lr) \
(((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
#define LR_IRQID(lr) \
((lr) & GICH_LR_VIRTUALID)
static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu)
{
clear_bit(lr_nr, vgic_cpu->lr_used);
vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE;
vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
}
/**
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
*
* Move any pending IRQs that have already been assigned to LRs back to the
* emulated distributor state so that the complete emulated state can be read
* from the main emulation structures without investigating the LRs.
*
* Note that IRQs in the active state in the LRs get their pending state moved
* to the distributor but the active state stays in the LRs, because we don't
* track the active state on the distributor side.
*/
static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int vcpu_id = vcpu->vcpu_id;
int i, irq, source_cpu;
u32 *lr;
for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
lr = &vgic_cpu->vgic_lr[i];
irq = LR_IRQID(*lr);
source_cpu = LR_CPUID(*lr);
/*
* There are three options for the state bits:
*
* 01: pending
* 10: active
* 11: pending and active
*
* If the LR holds only an active interrupt (not pending) then
* just leave it alone.
*/
if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT)
continue;
/*
* Reestablish the pending state on the distributor and the
* CPU interface. It may have already been pending, but that
* is fine, then we are only setting a few bits that were
* already set.
*/
vgic_dist_irq_set(vcpu, irq);
if (irq < VGIC_NR_SGIS)
dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu;
*lr &= ~GICH_LR_PENDING_BIT;
/*
* If there's no state left on the LR (it could still be
* active), then the LR does not hold any useful info and can
* be marked as free for other use.
*/
if (!(*lr & GICH_LR_STATE))
vgic_retire_lr(i, irq, vgic_cpu);
/* Finally update the VGIC state. */
vgic_update_state(vcpu->kvm);
}
}
/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int sgi;
int min_sgi = (offset & ~0x3) * 4;
int max_sgi = min_sgi + 3;
int vcpu_id = vcpu->vcpu_id;
u32 reg = 0;
/* Copy source SGIs from distributor side */
for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
int shift = 8 * (sgi - min_sgi);
reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
}
mmio_data_write(mmio, ~0, reg);
return false;
}
static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset, bool set)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int sgi;
int min_sgi = (offset & ~0x3) * 4;
int max_sgi = min_sgi + 3;
int vcpu_id = vcpu->vcpu_id;
u32 reg;
bool updated = false;
reg = mmio_data_read(mmio, ~0);
/* Clear pending SGIs on the distributor */
for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
u8 mask = reg >> (8 * (sgi - min_sgi));
if (set) {
if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
updated = true;
dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
} else {
if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
updated = true;
dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
}
}
if (updated)
vgic_update_state(vcpu->kvm);
return updated;
}
static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
if (!mmio->is_write)
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
else
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
}
static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
if (!mmio->is_write)
return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
else
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
}
/* /*
* I would have liked to use the kvm_bus_io_*() API instead, but it * I would have liked to use the kvm_bus_io_*() API instead, but it
* cannot cope with banked registers (only the VM pointer is passed * cannot cope with banked registers (only the VM pointer is passed
...@@ -602,7 +756,7 @@ struct mmio_range { ...@@ -602,7 +756,7 @@ struct mmio_range {
phys_addr_t offset); phys_addr_t offset);
}; };
static const struct mmio_range vgic_ranges[] = { static const struct mmio_range vgic_dist_ranges[] = {
{ {
.base = GIC_DIST_CTRL, .base = GIC_DIST_CTRL,
.len = 12, .len = 12,
...@@ -663,20 +817,29 @@ static const struct mmio_range vgic_ranges[] = { ...@@ -663,20 +817,29 @@ static const struct mmio_range vgic_ranges[] = {
.len = 4, .len = 4,
.handle_mmio = handle_mmio_sgi_reg, .handle_mmio = handle_mmio_sgi_reg,
}, },
{
.base = GIC_DIST_SGI_PENDING_CLEAR,
.len = VGIC_NR_SGIS,
.handle_mmio = handle_mmio_sgi_clear,
},
{
.base = GIC_DIST_SGI_PENDING_SET,
.len = VGIC_NR_SGIS,
.handle_mmio = handle_mmio_sgi_set,
},
{} {}
}; };
static const static const
struct mmio_range *find_matching_range(const struct mmio_range *ranges, struct mmio_range *find_matching_range(const struct mmio_range *ranges,
struct kvm_exit_mmio *mmio, struct kvm_exit_mmio *mmio,
phys_addr_t base) phys_addr_t offset)
{ {
const struct mmio_range *r = ranges; const struct mmio_range *r = ranges;
phys_addr_t addr = mmio->phys_addr - base;
while (r->len) { while (r->len) {
if (addr >= r->base && if (offset >= r->base &&
(addr + mmio->len) <= (r->base + r->len)) (offset + mmio->len) <= (r->base + r->len))
return r; return r;
r++; r++;
} }
...@@ -713,7 +876,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -713,7 +876,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
return true; return true;
} }
range = find_matching_range(vgic_ranges, mmio, base); offset = mmio->phys_addr - base;
range = find_matching_range(vgic_dist_ranges, mmio, offset);
if (unlikely(!range || !range->handle_mmio)) { if (unlikely(!range || !range->handle_mmio)) {
pr_warn("Unhandled access %d %08llx %d\n", pr_warn("Unhandled access %d %08llx %d\n",
mmio->is_write, mmio->phys_addr, mmio->len); mmio->is_write, mmio->phys_addr, mmio->len);
...@@ -824,8 +988,6 @@ static void vgic_update_state(struct kvm *kvm) ...@@ -824,8 +988,6 @@ static void vgic_update_state(struct kvm *kvm)
} }
} }
#define LR_CPUID(lr) \
(((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
#define MK_LR_PEND(src, irq) \ #define MK_LR_PEND(src, irq) \
(GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
...@@ -847,9 +1009,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) ...@@ -847,9 +1009,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
if (!vgic_irq_is_enabled(vcpu, irq)) { if (!vgic_irq_is_enabled(vcpu, irq)) {
vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; vgic_retire_lr(lr, irq, vgic_cpu);
clear_bit(lr, vgic_cpu->lr_used);
vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE;
if (vgic_irq_is_active(vcpu, irq)) if (vgic_irq_is_active(vcpu, irq))
vgic_irq_clear_active(vcpu, irq); vgic_irq_clear_active(vcpu, irq);
} }
...@@ -1243,15 +1403,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) ...@@ -1243,15 +1403,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**
* kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
* @vcpu: pointer to the vcpu struct
*
* Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
* this vcpu and enable the VGIC for this VCPU
*/
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int i; int i;
if (!irqchip_in_kernel(vcpu->kvm))
return 0;
if (vcpu->vcpu_id >= VGIC_MAX_CPUS) if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
return -EBUSY; return -EBUSY;
...@@ -1383,10 +1547,22 @@ int kvm_vgic_hyp_init(void) ...@@ -1383,10 +1547,22 @@ int kvm_vgic_hyp_init(void)
return ret; return ret;
} }
/**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct
*
* Map the virtual CPU interface into the VM before running any VCPUs. We
* can't do this at creation time, because user space must first set the
* virtual CPU interface address in the guest physical address space. Also
* initialize the ITARGETSRn regs to 0 on the emulated distributor.
*/
int kvm_vgic_init(struct kvm *kvm) int kvm_vgic_init(struct kvm *kvm)
{ {
int ret = 0, i; int ret = 0, i;
if (!irqchip_in_kernel(kvm))
return 0;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (vgic_initialized(kvm)) if (vgic_initialized(kvm))
...@@ -1409,7 +1585,6 @@ int kvm_vgic_init(struct kvm *kvm) ...@@ -1409,7 +1585,6 @@ int kvm_vgic_init(struct kvm *kvm)
for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
vgic_set_target_reg(kvm, 0, i); vgic_set_target_reg(kvm, 0, i);
kvm_timer_init(kvm);
kvm->arch.vgic.ready = true; kvm->arch.vgic.ready = true;
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
...@@ -1418,20 +1593,45 @@ int kvm_vgic_init(struct kvm *kvm) ...@@ -1418,20 +1593,45 @@ int kvm_vgic_init(struct kvm *kvm)
int kvm_vgic_create(struct kvm *kvm) int kvm_vgic_create(struct kvm *kvm)
{ {
int ret = 0; int i, vcpu_lock_idx = -1, ret = 0;
struct kvm_vcpu *vcpu;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) { if (kvm->arch.vgic.vctrl_base) {
ret = -EEXIST; ret = -EEXIST;
goto out; goto out;
} }
/*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
* that no other VCPUs are run while we create the vgic.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!mutex_trylock(&vcpu->mutex))
goto out_unlock;
vcpu_lock_idx = i;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once) {
ret = -EBUSY;
goto out_unlock;
}
}
spin_lock_init(&kvm->arch.vgic.lock); spin_lock_init(&kvm->arch.vgic.lock);
kvm->arch.vgic.vctrl_base = vgic_vctrl_base; kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
out_unlock:
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
mutex_unlock(&vcpu->mutex);
}
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
...@@ -1455,6 +1655,12 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, ...@@ -1455,6 +1655,12 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
{ {
int ret; int ret;
if (addr & ~KVM_PHYS_MASK)
return -E2BIG;
if (addr & (SZ_4K - 1))
return -EINVAL;
if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
return -EEXIST; return -EEXIST;
if (addr + size < addr) if (addr + size < addr)
...@@ -1467,26 +1673,41 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, ...@@ -1467,26 +1673,41 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
return ret; return ret;
} }
int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) /**
* kvm_vgic_addr - set or get vgic VM base addresses
* @kvm: pointer to the vm struct
* @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
* @addr: pointer to address value
* @write: if true set the address in the VM address space, if false read the
* address
*
* Set or get the vgic base addresses for the distributor and the virtual CPU
* interface in the VM physical address space. These addresses are properties
* of the emulated core/SoC and therefore user space initially knows this
* information.
*/
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
{ {
int r = 0; int r = 0;
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
if (addr & ~KVM_PHYS_MASK)
return -E2BIG;
if (addr & (SZ_4K - 1))
return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
switch (type) { switch (type) {
case KVM_VGIC_V2_ADDR_TYPE_DIST: case KVM_VGIC_V2_ADDR_TYPE_DIST:
if (write) {
r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
addr, KVM_VGIC_V2_DIST_SIZE); *addr, KVM_VGIC_V2_DIST_SIZE);
} else {
*addr = vgic->vgic_dist_base;
}
break; break;
case KVM_VGIC_V2_ADDR_TYPE_CPU: case KVM_VGIC_V2_ADDR_TYPE_CPU:
if (write) {
r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
addr, KVM_VGIC_V2_CPU_SIZE); *addr, KVM_VGIC_V2_CPU_SIZE);
} else {
*addr = vgic->vgic_cpu_base;
}
break; break;
default: default:
r = -ENODEV; r = -ENODEV;
...@@ -1495,3 +1716,302 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) ...@@ -1495,3 +1716,302 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return r; return r;
} }
static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u32 reg, mask = 0, shift = 0;
bool updated = false;
switch (offset & ~0x3) {
case GIC_CPU_CTRL:
mask = GICH_VMCR_CTRL_MASK;
shift = GICH_VMCR_CTRL_SHIFT;
break;
case GIC_CPU_PRIMASK:
mask = GICH_VMCR_PRIMASK_MASK;
shift = GICH_VMCR_PRIMASK_SHIFT;
break;
case GIC_CPU_BINPOINT:
mask = GICH_VMCR_BINPOINT_MASK;
shift = GICH_VMCR_BINPOINT_SHIFT;
break;
case GIC_CPU_ALIAS_BINPOINT:
mask = GICH_VMCR_ALIAS_BINPOINT_MASK;
shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT;
break;
}
if (!mmio->is_write) {
reg = (vgic_cpu->vgic_vmcr & mask) >> shift;
mmio_data_write(mmio, ~0, reg);
} else {
reg = mmio_data_read(mmio, ~0);
reg = (reg << shift) & mask;
if (reg != (vgic_cpu->vgic_vmcr & mask))
updated = true;
vgic_cpu->vgic_vmcr &= ~mask;
vgic_cpu->vgic_vmcr |= reg;
}
return updated;
}
static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
}
static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
u32 reg;
if (mmio->is_write)
return false;
/* GICC_IIDR */
reg = (PRODUCT_ID_KVM << 20) |
(GICC_ARCH_VERSION_V2 << 16) |
(IMPLEMENTER_ARM << 0);
mmio_data_write(mmio, ~0, reg);
return false;
}
/*
* CPU Interface Register accesses - these are not accessed by the VM, but by
* user space for saving and restoring VGIC state.
*/
static const struct mmio_range vgic_cpu_ranges[] = {
{
.base = GIC_CPU_CTRL,
.len = 12,
.handle_mmio = handle_cpu_mmio_misc,
},
{
.base = GIC_CPU_ALIAS_BINPOINT,
.len = 4,
.handle_mmio = handle_mmio_abpr,
},
{
.base = GIC_CPU_ACTIVEPRIO,
.len = 16,
.handle_mmio = handle_mmio_raz_wi,
},
{
.base = GIC_CPU_IDENT,
.len = 4,
.handle_mmio = handle_cpu_mmio_ident,
},
};
static int vgic_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
u32 *reg, bool is_write)
{
const struct mmio_range *r = NULL, *ranges;
phys_addr_t offset;
int ret, cpuid, c;
struct kvm_vcpu *vcpu, *tmp_vcpu;
struct vgic_dist *vgic;
struct kvm_exit_mmio mmio;
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
KVM_DEV_ARM_VGIC_CPUID_SHIFT;
mutex_lock(&dev->kvm->lock);
if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
ret = -EINVAL;
goto out;
}
vcpu = kvm_get_vcpu(dev->kvm, cpuid);
vgic = &dev->kvm->arch.vgic;
mmio.len = 4;
mmio.is_write = is_write;
if (is_write)
mmio_data_write(&mmio, ~0, *reg);
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
mmio.phys_addr = vgic->vgic_dist_base + offset;
ranges = vgic_dist_ranges;
break;
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
mmio.phys_addr = vgic->vgic_cpu_base + offset;
ranges = vgic_cpu_ranges;
break;
default:
BUG();
}
r = find_matching_range(ranges, &mmio, offset);
if (unlikely(!r || !r->handle_mmio)) {
ret = -ENXIO;
goto out;
}
spin_lock(&vgic->lock);
/*
* Ensure that no other VCPU is running by checking the vcpu->cpu
* field. If no other VPCUs are running we can safely access the VGIC
* state, because even if another VPU is run after this point, that
* VCPU will not touch the vgic state, because it will block on
* getting the vgic->lock in kvm_vgic_sync_hwstate().
*/
kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
if (unlikely(tmp_vcpu->cpu != -1)) {
ret = -EBUSY;
goto out_vgic_unlock;
}
}
/*
* Move all pending IRQs from the LRs on all VCPUs so the pending
* state can be properly represented in the register state accessible
* through this API.
*/
kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
vgic_unqueue_irqs(tmp_vcpu);
offset -= r->base;
r->handle_mmio(vcpu, &mmio, offset);
if (!is_write)
*reg = mmio_data_read(&mmio, ~0);
ret = 0;
out_vgic_unlock:
spin_unlock(&vgic->lock);
out:
mutex_unlock(&dev->kvm->lock);
return ret;
}
static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, true);
return (r == -ENODEV) ? -ENXIO : r;
}
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg;
if (get_user(reg, uaddr))
return -EFAULT;
return vgic_attr_regs_access(dev, attr, &reg, true);
}
}
return -ENXIO;
}
static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = -ENXIO;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR: {
u64 __user *uaddr = (u64 __user *)(long)attr->addr;
u64 addr;
unsigned long type = (unsigned long)attr->attr;
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r;
if (copy_to_user(uaddr, &addr, sizeof(addr)))
return -EFAULT;
break;
}
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
u32 __user *uaddr = (u32 __user *)(long)attr->addr;
u32 reg = 0;
r = vgic_attr_regs_access(dev, attr, &reg, false);
if (r)
return r;
r = put_user(reg, uaddr);
break;
}
}
return r;
}
static int vgic_has_attr_regs(const struct mmio_range *ranges,
phys_addr_t offset)
{
struct kvm_exit_mmio dev_attr_mmio;
dev_attr_mmio.len = 4;
if (find_matching_range(ranges, &dev_attr_mmio, offset))
return 0;
else
return -ENXIO;
}
static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
phys_addr_t offset;
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_ADDR:
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
case KVM_VGIC_V2_ADDR_TYPE_CPU:
return 0;
}
break;
case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
return vgic_has_attr_regs(vgic_dist_ranges, offset);
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
return vgic_has_attr_regs(vgic_cpu_ranges, offset);
}
return -ENXIO;
}
static void vgic_destroy(struct kvm_device *dev)
{
kfree(dev);
}
static int vgic_create(struct kvm_device *dev, u32 type)
{
return kvm_vgic_create(dev->kvm);
}
struct kvm_device_ops kvm_arm_vgic_v2_ops = {
.name = "kvm-arm-vgic",
.create = vgic_create,
.destroy = vgic_destroy,
.set_attr = vgic_set_attr,
.get_attr = vgic_get_attr,
.has_attr = vgic_has_attr,
};
...@@ -2272,6 +2272,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, ...@@ -2272,6 +2272,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
case KVM_DEV_TYPE_VFIO: case KVM_DEV_TYPE_VFIO:
ops = &kvm_vfio_ops; ops = &kvm_vfio_ops;
break; break;
#endif
#ifdef CONFIG_KVM_ARM_VGIC
case KVM_DEV_TYPE_ARM_VGIC_V2:
ops = &kvm_arm_vgic_v2_ops;
break;
#endif #endif
default: default:
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment