Commit a7a2c72a authored by Oliver Upton's avatar Oliver Upton

KVM: arm64: Separate out feature sanitisation and initialisation

kvm_vcpu_set_target() iteratively sanitises and copies feature flags in
one go. This is rather odd, especially considering the fact that bitmap
accessors can do the heavy lifting. A subsequent change will make vCPU
features VM-wide, and fitting that into the present implementation is
just a chore.

Rework the whole thing to use bitmap accessors to sanitise and copy
flags.

Link: https://lore.kernel.org/r/20230609190054.1542113-2-oliver.upton@linux.devSigned-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 44c026a7
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
#define KVM_VCPU_MAX_FEATURES 7 #define KVM_VCPU_MAX_FEATURES 7
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
#define KVM_REQ_SLEEP \ #define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
......
...@@ -1167,42 +1167,40 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ...@@ -1167,42 +1167,40 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL; return -EINVAL;
} }
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init) const struct kvm_vcpu_init *init)
{ {
unsigned int i, ret; unsigned long features = init->features[0];
u32 phys_target = kvm_target_cpu(); int i;
if (init->target != phys_target) if (features & ~KVM_VCPU_VALID_FEATURES)
return -EINVAL; return -ENOENT;
/* for (i = 1; i < ARRAY_SIZE(init->features); i++) {
* Secondary and subsequent calls to KVM_ARM_VCPU_INIT must if (init->features[i])
* use the same target. return -ENOENT;
*/ }
if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
return -EINVAL;
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */ return 0;
for (i = 0; i < sizeof(init->features) * 8; i++) { }
bool set = (init->features[i / 32] & (1 << (i % 32)));
if (set && i >= KVM_VCPU_MAX_FEATURES) static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
return -ENOENT; const struct kvm_vcpu_init *init)
{
unsigned long features = init->features[0];
/* return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
* Secondary and subsequent calls to KVM_ARM_VCPU_INIT must vcpu->arch.target != init->target;
* use the same feature set. }
*/
if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
test_bit(i, vcpu->arch.features) != set)
return -EINVAL;
if (set) static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
set_bit(i, vcpu->arch.features); const struct kvm_vcpu_init *init)
} {
unsigned long features = init->features[0];
int ret;
vcpu->arch.target = phys_target; vcpu->arch.target = init->target;
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */ /* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu); ret = kvm_reset_vcpu(vcpu);
...@@ -1214,6 +1212,27 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, ...@@ -1214,6 +1212,27 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
return ret; return ret;
} }
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
int ret;
if (init->target != kvm_target_cpu())
return -EINVAL;
ret = kvm_vcpu_init_check_features(vcpu, init);
if (ret)
return ret;
if (vcpu->arch.target == -1)
return __kvm_vcpu_set_target(vcpu, init);
if (kvm_vcpu_init_changed(vcpu, init))
return -EINVAL;
return kvm_reset_vcpu(vcpu);
}
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init) struct kvm_vcpu_init *init)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment