Commit f8d4635a authored by Dave Martin's avatar Dave Martin Committed by Marc Zyngier

KVM: arm64/sve: Miscellaneous tidyups in guest.c

 * Remove a few redundant blank lines that are stylistically
   inconsistent with code already in guest.c and are just taking up
   space.

 * Delete a couple of pointless empty default cases from switch
   statements whose behaviour is otherwise obvious anyway.

 * Fix some typos and consolidate some redundantly duplicated
   comments.

 * Respell the slice index check in sve_reg_to_region() as "> 0"
   to be more consistent with what is logically being checked here
   (i.e., "is the slice index too large"), even though we don't try
   to cope with multiple slices yet.

No functional change.
Suggested-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 8ae6efdd
...@@ -290,9 +290,10 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -290,9 +290,10 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
/* /*
* number of register slices required to cover each whole SVE register on vcpu * Number of register slices required to cover each whole SVE register.
* NOTE: If you are tempted to modify this, you must also to rework * NOTE: Only the first slice every exists, for now.
* sve_reg_to_region() to match: * If you are tempted to modify this, you must also rework sve_reg_to_region()
* to match:
*/ */
#define vcpu_sve_slices(vcpu) 1 #define vcpu_sve_slices(vcpu) 1
...@@ -334,8 +335,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, ...@@ -334,8 +335,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
/* Verify that we match the UAPI header: */ /* Verify that we match the UAPI header: */
BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
/* Only the first slice ever exists, for now: */ if ((reg->id & SVE_REG_SLICE_MASK) > 0)
if ((reg->id & SVE_REG_SLICE_MASK) != 0)
return -ENOENT; return -ENOENT;
vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
...@@ -520,7 +520,6 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -520,7 +520,6 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
{ {
/* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu); const unsigned int slices = vcpu_sve_slices(vcpu);
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
...@@ -536,7 +535,6 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) ...@@ -536,7 +535,6 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices) u64 __user *uindices)
{ {
/* Only the first slice ever exists, for now */
const unsigned int slices = vcpu_sve_slices(vcpu); const unsigned int slices = vcpu_sve_slices(vcpu);
u64 reg; u64 reg;
unsigned int i, n; unsigned int i, n;
...@@ -555,7 +553,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, ...@@ -555,7 +553,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_VLS; reg = KVM_REG_ARM64_SVE_VLS;
if (put_user(reg, uindices++)) if (put_user(reg, uindices++))
return -EFAULT; return -EFAULT;
++num_regs; ++num_regs;
for (i = 0; i < slices; i++) { for (i = 0; i < slices; i++) {
...@@ -563,7 +560,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, ...@@ -563,7 +560,6 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_ZREG(n, i); reg = KVM_REG_ARM64_SVE_ZREG(n, i);
if (put_user(reg, uindices++)) if (put_user(reg, uindices++))
return -EFAULT; return -EFAULT;
num_regs++; num_regs++;
} }
...@@ -571,14 +567,12 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, ...@@ -571,14 +567,12 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
reg = KVM_REG_ARM64_SVE_PREG(n, i); reg = KVM_REG_ARM64_SVE_PREG(n, i);
if (put_user(reg, uindices++)) if (put_user(reg, uindices++))
return -EFAULT; return -EFAULT;
num_regs++; num_regs++;
} }
reg = KVM_REG_ARM64_SVE_FFR(i); reg = KVM_REG_ARM64_SVE_FFR(i);
if (put_user(reg, uindices++)) if (put_user(reg, uindices++))
return -EFAULT; return -EFAULT;
num_regs++; num_regs++;
} }
...@@ -645,7 +639,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -645,7 +639,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
default: break; /* fall through */
} }
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
...@@ -664,7 +657,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -664,7 +657,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
default: break; /* fall through */
} }
if (is_timer_reg(reg->id)) if (is_timer_reg(reg->id))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment