Commit b7b27fac authored by Dongjiu Geng's avatar Dongjiu Geng Committed by Marc Zyngier

arm/arm64: KVM: Add KVM_GET/SET_VCPU_EVENTS

For the migrating VMs, user space may need to know the exception
state. For example, in the machine A, KVM make an SError pending,
when migrate to B, KVM also needs to pend an SError.

This new IOCTL exports user-invisible states related to SError.
Together with appropriate user space changes, user space can get/set
the SError exception state to do migrate/snapshot/suspend.
Signed-off-by: default avatarDongjiu Geng <gengdongjiu@huawei.com>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
[expanded documentation wording]
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 327432c2
...@@ -835,11 +835,13 @@ struct kvm_clock_data { ...@@ -835,11 +835,13 @@ struct kvm_clock_data {
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86 Architectures: x86, arm64
Type: vm ioctl Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (out) Parameters: struct kvm_vcpu_event (out)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
X86:
Gets currently pending exceptions, interrupts, and NMIs as well as related Gets currently pending exceptions, interrupts, and NMIs as well as related
states of the vcpu. states of the vcpu.
...@@ -881,15 +883,52 @@ Only two fields are defined in the flags field: ...@@ -881,15 +883,52 @@ Only two fields are defined in the flags field:
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that - KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
smi contains a valid state. smi contains a valid state.
ARM64:
If the guest accesses a device that is being emulated by the host kernel in
such a way that a real device would generate a physical SError, KVM may make
a virtual SError pending for that VCPU. This system error interrupt remains
pending until the guest takes the exception by unmasking PSTATE.A.
Running the VCPU may cause it to take a pending SError, or make an access that
causes an SError to become pending. The event's description is only valid while
the VPCU is not running.
This API provides a way to read and write the pending 'event' state that is not
visible to the guest. To save, restore or migrate a VCPU the struct representing
the state can be read then written using this GET/SET API, along with the other
guest-visible registers. It is not possible to 'cancel' an SError that has been
made pending.
A device being emulated in user-space may also wish to generate an SError. To do
this the events structure can be populated by user-space. The current state
should be read first, to ensure no existing SError is pending. If an existing
SError is pending, the architecture's 'Multiple SError interrupts' rules should
be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and
Serviceability (RAS) Specification").
struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
/* Align it to 8 bytes */
__u8 pad[6];
__u64 serror_esr;
} exception;
__u32 reserved[12];
};
4.32 KVM_SET_VCPU_EVENTS 4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS Capability: KVM_CAP_VCPU_EVENTS
Extended by: KVM_CAP_INTR_SHADOW Extended by: KVM_CAP_INTR_SHADOW
Architectures: x86 Architectures: x86, arm64
Type: vm ioctl Type: vcpu ioctl
Parameters: struct kvm_vcpu_event (in) Parameters: struct kvm_vcpu_event (in)
Returns: 0 on success, -1 on error Returns: 0 on success, -1 on error
X86:
Set pending exceptions, interrupts, and NMIs as well as related states of the Set pending exceptions, interrupts, and NMIs as well as related states of the
vcpu. vcpu.
...@@ -910,6 +949,13 @@ shall be written into the VCPU. ...@@ -910,6 +949,13 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
ARM64:
Set the pending SError exception state for this VCPU. It is not possible to
'cancel' an Serror that has been made pending.
See KVM_GET_VCPU_EVENTS for the data structure.
4.33 KVM_GET_DEBUGREGS 4.33 KVM_GET_DEBUGREGS
......
...@@ -93,6 +93,11 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) ...@@ -93,6 +93,11 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TWE; vcpu->arch.hcr_el2 |= HCR_TWE;
} }
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
}
static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
{ {
vcpu->arch.vsesr_el2 = vsesr; vcpu->arch.vsesr_el2 = vsesr;
......
...@@ -350,6 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); ...@@ -350,6 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
...@@ -378,6 +383,8 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -378,6 +383,8 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void); int kvm_perf_init(void);
int kvm_perf_teardown(void); int kvm_perf_teardown(void);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
...@@ -154,6 +155,18 @@ struct kvm_sync_regs { ...@@ -154,6 +155,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
}; };
/* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
/* Align it to 8 bytes */
__u8 pad[6];
__u64 serror_esr;
} exception;
__u32 reserved[12];
};
/* If you need to interpret the index values, here is the key: */ /* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 #define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16 #define KVM_REG_ARM_COPROC_SHIFT 16
......
...@@ -289,6 +289,52 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -289,6 +289,52 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
} }
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
memset(events, 0, sizeof(*events));
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
if (events->exception.serror_pending && events->exception.serror_has_esr)
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
return 0;
}
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
int i;
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
/* check whether the reserved field is zero */
for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
if (events->reserved[i])
return -EINVAL;
/* check whether the pad field is zero */
for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
if (events->exception.pad[i])
return -EINVAL;
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
kvm_set_sei_esr(vcpu, events->exception.serror_esr);
else
return -EINVAL;
} else if (serror_pending) {
kvm_inject_vabt(vcpu);
}
return 0;
}
int __attribute_const__ kvm_target_cpu(void) int __attribute_const__ kvm_target_cpu(void)
{ {
unsigned long implementor = read_cpuid_implementor(); unsigned long implementor = read_cpuid_implementor();
......
...@@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) ...@@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu); inject_undef64(vcpu);
} }
static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
{ {
vcpu_set_vsesr(vcpu, esr); vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
*vcpu_hcr(vcpu) |= HCR_VSE; *vcpu_hcr(vcpu) |= HCR_VSE;
} }
...@@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr) ...@@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
*/ */
void kvm_inject_vabt(struct kvm_vcpu *vcpu) void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{ {
pend_guest_serror(vcpu, ESR_ELx_ISV); kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
} }
...@@ -79,6 +79,7 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -79,6 +79,7 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_VCPU_EVENTS:
r = 1; r = 1;
break; break;
default: default:
......
...@@ -1130,6 +1130,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1130,6 +1130,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_arm_vcpu_has_attr(vcpu, &attr); r = kvm_arm_vcpu_has_attr(vcpu, &attr);
break; break;
} }
#ifdef __KVM_HAVE_VCPU_EVENTS
case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
if (kvm_arm_vcpu_get_events(vcpu, &events))
return -EINVAL;
if (copy_to_user(argp, &events, sizeof(events)))
return -EFAULT;
return 0;
}
case KVM_SET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
if (copy_from_user(&events, argp, sizeof(events)))
return -EFAULT;
return kvm_arm_vcpu_set_events(vcpu, &events);
}
#endif
default: default:
r = -EINVAL; r = -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment