Commit 84886c26 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.16-1' of...

Merge tag 'kvmarm-fixes-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/arm64 fixes for 5.16, take #1

- Fix the host S2 finalization by solely iterating over the memblocks
  instead of the whole IPA space

- Tighten the return value of kvm_vcpu_preferred_target() now that
  32bit support is long gone

- Make sure the extraction of ESR_ELx.EC is limited to the architected
  bits

- Comment fixups
parents 501cfe06 50a8d331
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#define ESR_ELx_EC_MAX (0x3F) #define ESR_ELx_EC_MAX (0x3F)
#define ESR_ELx_EC_SHIFT (26) #define ESR_ELx_EC_SHIFT (26)
#define ESR_ELx_EC_WIDTH (6)
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) #define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
......
...@@ -584,7 +584,7 @@ struct kvm_vcpu_stat { ...@@ -584,7 +584,7 @@ struct kvm_vcpu_stat {
u64 exits; u64 exits;
}; };
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
......
...@@ -1389,12 +1389,9 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1389,12 +1389,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
} }
case KVM_ARM_PREFERRED_TARGET: { case KVM_ARM_PREFERRED_TARGET: {
int err;
struct kvm_vcpu_init init; struct kvm_vcpu_init init;
err = kvm_vcpu_preferred_target(&init); kvm_vcpu_preferred_target(&init);
if (err)
return err;
if (copy_to_user(argp, &init, sizeof(init))) if (copy_to_user(argp, &init, sizeof(init)))
return -EFAULT; return -EFAULT;
......
...@@ -869,13 +869,10 @@ u32 __attribute_const__ kvm_target_cpu(void) ...@@ -869,13 +869,10 @@ u32 __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8; return KVM_ARM_TARGET_GENERIC_V8;
} }
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{ {
u32 target = kvm_target_cpu(); u32 target = kvm_target_cpu();
if (target < 0)
return -ENODEV;
memset(init, 0, sizeof(*init)); memset(init, 0, sizeof(*init));
/* /*
...@@ -885,8 +882,6 @@ int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) ...@@ -885,8 +882,6 @@ int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
* target type. * target type.
*/ */
init->target = (__u32)target; init->target = (__u32)target;
return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
el1_sync: // Guest trapped into EL2 el1_sync: // Guest trapped into EL2
mrs x0, esr_el2 mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
cmp x0, #ESR_ELx_EC_HVC64 cmp x0, #ESR_ELx_EC_HVC64
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap b.ne el1_trap
......
...@@ -141,7 +141,7 @@ SYM_FUNC_END(__host_hvc) ...@@ -141,7 +141,7 @@ SYM_FUNC_END(__host_hvc)
.L__vect_start\@: .L__vect_start\@:
stp x0, x1, [sp, #-16]! stp x0, x1, [sp, #-16]!
mrs x0, esr_el2 mrs x0, esr_el2
lsr x0, x0, #ESR_ELx_EC_SHIFT ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
cmp x0, #ESR_ELx_EC_HVC64 cmp x0, #ESR_ELx_EC_HVC64
b.eq __host_hvc b.eq __host_hvc
b __host_exit b __host_exit
......
...@@ -178,7 +178,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, ...@@ -178,7 +178,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
phys = kvm_pte_to_phys(pte); phys = kvm_pte_to_phys(pte);
if (!addr_is_memory(phys)) if (!addr_is_memory(phys))
return 0; return -EINVAL;
/* /*
* Adjust the host stage-2 mappings to match the ownership attributes * Adjust the host stage-2 mappings to match the ownership attributes
...@@ -207,8 +207,18 @@ static int finalize_host_mappings(void) ...@@ -207,8 +207,18 @@ static int finalize_host_mappings(void)
.cb = finalize_host_mappings_walker, .cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF, .flags = KVM_PGTABLE_WALK_LEAF,
}; };
int i, ret;
for (i = 0; i < hyp_memblock_nr; i++) {
struct memblock_region *reg = &hyp_memory[i];
u64 start = (u64)hyp_phys_to_virt(reg->base);
ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
if (ret)
return ret;
}
return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits), &walker); return 0;
} }
void __noreturn __pkvm_init_finalise(void) void __noreturn __pkvm_init_finalise(void)
......
...@@ -474,7 +474,7 @@ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -474,7 +474,7 @@ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return true; return true;
} }
/** /*
* Handler for protected VM restricted exceptions. * Handler for protected VM restricted exceptions.
* *
* Inject an undefined exception into the guest and return true to indicate that * Inject an undefined exception into the guest and return true to indicate that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment