Commit 8e3c54c8 authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64: Enumerate SVE register indices for KVM_GET_REG_LIST

This patch includes the SVE register IDs in the list returned by
KVM_GET_REG_LIST, as appropriate.

On a non-SVE-enabled vcpu, no new IDs are added.

On an SVE-enabled vcpu, IDs for the FPSIMD V-registers are removed
from the list, since userspace is required to access the Z-
registers instead in order to access the V-register content.  For
the variably-sized SVE registers, the appropriate set of slice IDs
are enumerated, depending on the maximum vector length for the
vcpu.

As it currently stands, the SVE architecture never requires more
than one slice to exist per register, so this patch adds no
explicit support for enumerating multiple slices.  The code can be
extended straightforwardly to support this in the future, if
needed.
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarJulien Thierry <julien.thierry@arm.com>
Tested-by: default avatarzhang.lei <zhang.lei@jp.fujitsu.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent e1c9c983
...@@ -222,6 +222,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -222,6 +222,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
/*
* number of register slices required to cover each whole SVE register on vcpu
* NOTE: If you are tempted to modify this, you must also to rework
* sve_reg_to_region() to match:
*/
#define vcpu_sve_slices(vcpu) 1
/* Bounds of a single SVE register slice within vcpu->arch.sve_state */ /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
struct sve_state_reg_region { struct sve_state_reg_region {
unsigned int koffset; /* offset into sve_state in kernel memory */ unsigned int koffset; /* offset into sve_state in kernel memory */
...@@ -411,6 +418,56 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -411,6 +418,56 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
{
/* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu);
if (!vcpu_has_sve(vcpu))
return 0;
return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */);
}
static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
/* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu);
u64 reg;
unsigned int i, n;
int num_regs = 0;
if (!vcpu_has_sve(vcpu))
return 0;
for (i = 0; i < slices; i++) {
for (n = 0; n < SVE_NUM_ZREGS; n++) {
reg = KVM_REG_ARM64_SVE_ZREG(n, i);
if (put_user(reg, uindices++))
return -EFAULT;
num_regs++;
}
for (n = 0; n < SVE_NUM_PREGS; n++) {
reg = KVM_REG_ARM64_SVE_PREG(n, i);
if (put_user(reg, uindices++))
return -EFAULT;
num_regs++;
}
reg = KVM_REG_ARM64_SVE_FFR(i);
if (put_user(reg, uindices++))
return -EFAULT;
num_regs++;
}
return num_regs;
}
/** /**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
* *
...@@ -421,6 +478,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) ...@@ -421,6 +478,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
unsigned long res = 0; unsigned long res = 0;
res += num_core_regs(vcpu); res += num_core_regs(vcpu);
res += num_sve_regs(vcpu);
res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu);
res += kvm_arm_get_fw_num_regs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu);
res += NUM_TIMER_REGS; res += NUM_TIMER_REGS;
...@@ -442,6 +500,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ...@@ -442,6 +500,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
return ret; return ret;
uindices += ret; uindices += ret;
ret = copy_sve_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment