Commit b21e31b2 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-for-5.2-2' of...

Merge tag 'kvmarm-fixes-for-5.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.2, take #2

- SVE cleanup killing a warning with ancient GCC versions
- Don't report non-existent system registers to userspace
- Fix memory leak when freeing the vgic ITS
- Properly lower the interrupt on the emulated physical timer
parents 9fd58877 e4e5a865
...@@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id) ...@@ -70,10 +70,8 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
} }
static int validate_core_offset(const struct kvm_vcpu *vcpu, static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
const struct kvm_one_reg *reg)
{ {
u64 off = core_reg_offset_from_id(reg->id);
int size; int size;
switch (off) { switch (off) {
...@@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu, ...@@ -103,8 +101,7 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
} }
if (KVM_REG_SIZE(reg->id) != size || if (!IS_ALIGNED(off, size / sizeof(__u32)))
!IS_ALIGNED(off, size / sizeof(__u32)))
return -EINVAL; return -EINVAL;
/* /*
...@@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu, ...@@ -115,6 +112,21 @@ static int validate_core_offset(const struct kvm_vcpu *vcpu,
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
return -EINVAL; return -EINVAL;
return size;
}
static int validate_core_offset(const struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size = core_reg_size_from_offset(vcpu, off);
if (size < 0)
return -EINVAL;
if (KVM_REG_SIZE(reg->id) != size)
return -EINVAL;
return 0; return 0;
} }
...@@ -207,13 +219,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -207,13 +219,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
#define vq_present(vqs, vq) ((vqs)[vq_word(vq)] & vq_mask(vq))
static bool vq_present(
const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
unsigned int vq)
{
return (*vqs)[vq_word(vq)] & vq_mask(vq);
}
static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
...@@ -258,7 +264,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -258,7 +264,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
max_vq = 0; max_vq = 0;
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
if (vq_present(&vqs, vq)) if (vq_present(vqs, vq))
max_vq = vq; max_vq = vq;
if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
...@@ -272,7 +278,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -272,7 +278,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
* maximum: * maximum:
*/ */
for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
if (vq_present(&vqs, vq) != sve_vq_available(vq)) if (vq_present(vqs, vq) != sve_vq_available(vq))
return -EINVAL; return -EINVAL;
/* Can't run with no vector lengths at all: */ /* Can't run with no vector lengths at all: */
...@@ -453,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, ...@@ -453,19 +459,34 @@ static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
{ {
unsigned int i; unsigned int i;
int n = 0; int n = 0;
const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
/* u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
* The KVM_REG_ARM64_SVE regs must be used instead of int size = core_reg_size_from_offset(vcpu, i);
* KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
* SVE-enabled vcpus: if (size < 0)
*/ continue;
if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
switch (size) {
case sizeof(__u32):
reg |= KVM_REG_SIZE_U32;
break;
case sizeof(__u64):
reg |= KVM_REG_SIZE_U64;
break;
case sizeof(__uint128_t):
reg |= KVM_REG_SIZE_U128;
break;
default:
WARN_ON(1);
continue; continue;
}
if (uindices) { if (uindices) {
if (put_user(core_reg | i, uindices)) if (put_user(reg, uindices))
return -EFAULT; return -EFAULT;
uindices++; uindices++;
} }
......
...@@ -321,14 +321,15 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, ...@@ -321,14 +321,15 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
} }
} }
/* Only called for a fully emulated timer */
static void timer_emulate(struct arch_timer_context *ctx) static void timer_emulate(struct arch_timer_context *ctx)
{ {
bool should_fire = kvm_timer_should_fire(ctx); bool should_fire = kvm_timer_should_fire(ctx);
trace_kvm_timer_emulate(ctx, should_fire); trace_kvm_timer_emulate(ctx, should_fire);
if (should_fire) { if (should_fire != ctx->irq.level) {
kvm_timer_update_irq(ctx->vcpu, true, ctx); kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
return; return;
} }
......
...@@ -1734,6 +1734,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev) ...@@ -1734,6 +1734,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
mutex_unlock(&its->its_lock); mutex_unlock(&its->its_lock);
kfree(its); kfree(its);
kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
} }
static int vgic_its_has_attr_regs(struct kvm_device *dev, static int vgic_its_has_attr_regs(struct kvm_device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment