Commit b0960b95 authored by James Morse's avatar James Morse Committed by Marc Zyngier

KVM: arm: Add 32bit get/set events support

arm64's new use of KVMs get_events/set_events API calls isn't just
or RAS, it allows an SError that has been made pending by KVM as
part of its device emulation to be migrated.

Wire this up for 32bit too.

We only need to read/write the HCR_VA bit, and check that no esr has
been provided, as we don't yet support VDFSR.
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarDongjiu Geng <gengdongjiu@huawei.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 539aee0e
...@@ -835,7 +835,7 @@ struct kvm_clock_data { ...@@ -835,7 +835,7 @@ struct kvm_clock_data {
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86, arm64 Architectures: x86, arm, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (out) Parameters: struct kvm_vcpu_event (out)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -883,7 +883,7 @@ Only two fields are defined in the flags field: ...@@ -883,7 +883,7 @@ Only two fields are defined in the flags field:
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that - KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state. smi contains a valid state.
ARM64: ARM/ARM64:
If the guest accesses a device that is being emulated by the host kernel in If the guest accesses a device that is being emulated by the host kernel in
such a way that a real device would generate a physical SError, KVM may make such a way that a real device would generate a physical SError, KVM may make
...@@ -934,7 +934,7 @@ struct kvm_vcpu_events { ...@@ -934,7 +934,7 @@ struct kvm_vcpu_events {
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86, arm64 Architectures: x86, arm, arm64
Type: vcpu ioctl Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (in) Parameters: struct kvm_vcpu_event (in)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
...@@ -961,7 +961,7 @@ shall be written into the VCPU. ...@@ -961,7 +961,7 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
ARM64: ARM/ARM64:
Set the pending SError exception state for this VCPU. It is not possible to Set the pending SError exception state for this VCPU. It is not possible to
'cancel' an Serror that has been made pending. 'cancel' an Serror that has been made pending.
......
...@@ -216,6 +216,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); ...@@ -216,6 +216,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_call_hyp(void *hypfn, ...); unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask); void force_vm_exit(const cpumask_t *mask);
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
...@@ -125,6 +126,18 @@ struct kvm_sync_regs { ...@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
}; };
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
/* Align it to 8 bytes */
__u8 pad[6];
__u64 serror_esr;
} exception;
__u32 reserved[12];
};
/* If you need to interpret the index values, here is the key: */ /* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16 #define KVM_REG_ARM_COPROC_SHIFT 16
......
...@@ -261,6 +261,29 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -261,6 +261,29 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
} }
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
return 0;
}
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
if (serror_pending && has_esr)
return -EINVAL;
else if (serror_pending)
kvm_inject_vabt(vcpu);
return 0;
}
int __attribute_const__ kvm_target_cpu(void) int __attribute_const__ kvm_target_cpu(void)
{ {
switch (read_cpuid_part()) { switch (read_cpuid_part()) {
......
...@@ -1050,7 +1050,6 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, ...@@ -1050,7 +1050,6 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
#ifdef __KVM_HAVE_VCPU_EVENTS /* temporary: until 32bit is wired up */
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events) struct kvm_vcpu_events *events)
{ {
...@@ -1076,7 +1075,6 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -1076,7 +1075,6 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
return __kvm_arm_vcpu_set_events(vcpu, events); return __kvm_arm_vcpu_set_events(vcpu, events);
} }
#endif /* __KVM_HAVE_VCPU_EVENTS */
long kvm_arch_vcpu_ioctl(struct file *filp, long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg) unsigned int ioctl, unsigned long arg)
...@@ -1158,7 +1156,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1158,7 +1156,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_arm_vcpu_has_attr(vcpu, &attr); r = kvm_arm_vcpu_has_attr(vcpu, &attr);
break; break;
} }
#ifdef __KVM_HAVE_VCPU_EVENTS
case KVM_GET_VCPU_EVENTS: { case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events; struct kvm_vcpu_events events;
...@@ -1178,7 +1175,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1178,7 +1175,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return kvm_arm_vcpu_set_events(vcpu, &events); return kvm_arm_vcpu_set_events(vcpu, &events);
} }
#endif
default: default:
r = -EINVAL; r = -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment