Commit a313c8e0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "PPC:
   - Fix a bug where we try to do an ultracall on a system without an
     ultravisor

  KVM:
   - Fix uninitialised sysreg accessor
   - Fix handling of demand-paged device mappings
   - Stop spamming the console on IMPDEF sysregs
   - Relax mappings of writable memslots
   - Assorted cleanups

  MIPS:
   - Now orphan, James Hogan is stepping down

  x86:
   - MAINTAINERS change, so long Radim and thanks for all the fish
   - supported CPUID fixes for AMD machines without SPEC_CTRL"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  MAINTAINERS: remove Radim from KVM maintainers
  MAINTAINERS: Orphan KVM for MIPS
  kvm: x86: Host feature SSBD doesn't imply guest feature AMD_SSBD
  kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
  KVM: PPC: Book3S HV: Don't do ultravisor calls on systems without ultravisor
  KVM: arm/arm64: Properly handle faulting of device mappings
  KVM: arm64: Ensure 'params' is initialised when looking up sys register
  KVM: arm/arm64: Remove excessive permission check in kvm_arch_prepare_memory_region
  KVM: arm64: Don't log IMP DEF sysreg traps
  KVM: arm64: Sanely ratelimit sysreg messages
  KVM: arm/arm64: vgic: Use wrapper function to lock/unlock all vcpus in kvm_vgic_create()
  KVM: arm/arm64: vgic: Fix potential double free dist->spis in __kvm_vgic_destroy()
  KVM: arm/arm64: Get rid of unused arg in cpu_init_hyp_mode()
parents 7214618c d68321de
...@@ -9041,7 +9041,6 @@ F: include/linux/umh.h ...@@ -9041,7 +9041,6 @@ F: include/linux/umh.h
KERNEL VIRTUAL MACHINE (KVM) KERNEL VIRTUAL MACHINE (KVM)
M: Paolo Bonzini <pbonzini@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com>
M: Radim Krčmář <rkrcmar@redhat.com>
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
W: http://www.linux-kvm.org W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
...@@ -9076,9 +9075,9 @@ F: virt/kvm/arm/ ...@@ -9076,9 +9075,9 @@ F: virt/kvm/arm/
F: include/kvm/arm_* F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: James Hogan <jhogan@kernel.org>
L: linux-mips@vger.kernel.org L: linux-mips@vger.kernel.org
S: Supported L: kvm@vger.kernel.org
S: Orphan
F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm* F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/ F: arch/mips/kvm/
...@@ -9113,7 +9112,6 @@ F: tools/testing/selftests/kvm/*/s390x/ ...@@ -9113,7 +9112,6 @@ F: tools/testing/selftests/kvm/*/s390x/
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Paolo Bonzini <pbonzini@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com>
M: Radim Krčmář <rkrcmar@redhat.com>
R: Sean Christopherson <sean.j.christopherson@intel.com> R: Sean Christopherson <sean.j.christopherson@intel.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com> R: Vitaly Kuznetsov <vkuznets@redhat.com>
R: Wanpeng Li <wanpengli@tencent.com> R: Wanpeng Li <wanpengli@tencent.com>
......
...@@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, ...@@ -2098,9 +2098,9 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
WARN_ON(1); WARN_ON(1);
} }
kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n", print_sys_reg_msg(params,
"Unsupported guest CP%d access at: %08lx [%08lx]\n",
cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
} }
...@@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -2233,6 +2233,12 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
NULL, 0); NULL, 0);
} }
static bool is_imp_def_sys_reg(struct sys_reg_params *params)
{
// See ARM DDI 0487E.a, section D12.3.2
return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu, static int emulate_sys_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *params) struct sys_reg_params *params)
{ {
...@@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu, ...@@ -2248,10 +2254,12 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
if (likely(r)) { if (likely(r)) {
perform_access(vcpu, params, r); perform_access(vcpu, params, r);
} else if (is_imp_def_sys_reg(params)) {
kvm_inject_undefined(vcpu);
} else { } else {
kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n", print_sys_reg_msg(params,
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
} }
return 1; return 1;
...@@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, ...@@ -2360,8 +2368,11 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
return NULL; return NULL;
if (!index_to_params(id, &params))
return NULL;
table = get_target_table(vcpu->arch.target, true, &num); table = get_target_table(vcpu->arch.target, true, &num);
r = find_reg_by_id(id, &params, table, num); r = find_reg(&params, table, num);
if (!r) if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
......
...@@ -62,11 +62,24 @@ struct sys_reg_desc { ...@@ -62,11 +62,24 @@ struct sys_reg_desc {
#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ #define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ #define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
static inline void print_sys_reg_instr(const struct sys_reg_params *p) static __printf(2, 3)
inline void print_sys_reg_msg(const struct sys_reg_params *p,
char *fmt, ...)
{ {
va_list va;
va_start(va, fmt);
/* Look, we even formatted it for you to paste into the table! */ /* Look, we even formatted it for you to paste into the table! */
kvm_pr_unimpl(" { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
&(struct va_format){ fmt, &va },
p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read"); p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
va_end(va);
}
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
{
/* GCC warns on an empty format string */
print_sys_reg_msg(p, "%s", "");
} }
static inline bool ignore_write(struct kvm_vcpu *vcpu, static inline bool ignore_write(struct kvm_vcpu *vcpu,
......
...@@ -4983,6 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) ...@@ -4983,6 +4983,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
if (nesting_enabled(kvm)) if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm); kvmhv_release_all_nested(kvm);
kvm->arch.process_table = 0; kvm->arch.process_table = 0;
if (kvm->arch.secure_guest)
uv_svm_terminate(kvm->arch.lpid); uv_svm_terminate(kvm->arch.lpid);
kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
} }
......
...@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) ...@@ -402,7 +402,8 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
entry->edx |= F(SPEC_CTRL); entry->edx |= F(SPEC_CTRL);
if (boot_cpu_has(X86_FEATURE_STIBP)) if (boot_cpu_has(X86_FEATURE_STIBP))
entry->edx |= F(INTEL_STIBP); entry->edx |= F(INTEL_STIBP);
if (boot_cpu_has(X86_FEATURE_SSBD)) if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->edx |= F(SPEC_CTRL_SSBD); entry->edx |= F(SPEC_CTRL_SSBD);
/* /*
* We emulate ARCH_CAPABILITIES in software even * We emulate ARCH_CAPABILITIES in software even
...@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, ...@@ -759,7 +760,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ebx |= F(AMD_IBRS); entry->ebx |= F(AMD_IBRS);
if (boot_cpu_has(X86_FEATURE_STIBP)) if (boot_cpu_has(X86_FEATURE_STIBP))
entry->ebx |= F(AMD_STIBP); entry->ebx |= F(AMD_STIBP);
if (boot_cpu_has(X86_FEATURE_SSBD)) if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->ebx |= F(AMD_SSBD); entry->ebx |= F(AMD_SSBD);
if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
entry->ebx |= F(AMD_SSB_NO); entry->ebx |= F(AMD_SSB_NO);
......
...@@ -1352,7 +1352,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1352,7 +1352,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
} }
} }
static void cpu_init_hyp_mode(void *dummy) static void cpu_init_hyp_mode(void)
{ {
phys_addr_t pgd_ptr; phys_addr_t pgd_ptr;
unsigned long hyp_stack_ptr; unsigned long hyp_stack_ptr;
...@@ -1386,7 +1386,7 @@ static void cpu_hyp_reinit(void) ...@@ -1386,7 +1386,7 @@ static void cpu_hyp_reinit(void)
if (is_kernel_in_hyp_mode()) if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe(); kvm_timer_init_vhe();
else else
cpu_init_hyp_mode(NULL); cpu_init_hyp_mode();
kvm_arm_init_debug(); kvm_arm_init_debug();
......
...@@ -38,6 +38,11 @@ static unsigned long io_map_base; ...@@ -38,6 +38,11 @@ static unsigned long io_map_base;
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
static bool is_iomap(unsigned long flags)
{
return flags & KVM_S2PTE_FLAG_IS_IOMAP;
}
static bool memslot_is_logging(struct kvm_memory_slot *memslot) static bool memslot_is_logging(struct kvm_memory_slot *memslot)
{ {
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
...@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = vma_kernel_pagesize(vma); vma_pagesize = vma_kernel_pagesize(vma);
if (logging_active || if (logging_active ||
(vma->vm_flags & VM_PFNMAP) ||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true; force_pte = true;
vma_pagesize = PAGE_SIZE; vma_pagesize = PAGE_SIZE;
...@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
writable = false; writable = false;
} }
if (exec_fault && is_iomap(flags))
return -ENOEXEC;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
if (mmu_notifier_retry(kvm, mmu_seq)) if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock; goto out_unlock;
...@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (writable) if (writable)
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
if (fault_status != FSC_PERM) if (fault_status != FSC_PERM && !is_iomap(flags))
clean_dcache_guest_page(pfn, vma_pagesize); clean_dcache_guest_page(pfn, vma_pagesize);
if (exec_fault) if (exec_fault)
...@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (kvm_is_error_hva(hva) || (write_fault && !writable)) { if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
if (is_iabt) { if (is_iabt) {
/* Prefetch Abort on I/O address */ /* Prefetch Abort on I/O address */
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); ret = -ENOEXEC;
ret = 1; goto out;
goto out_unlock;
} }
/* /*
...@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0) if (ret == 0)
ret = 1; ret = 1;
out:
if (ret == -ENOEXEC) {
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
ret = 1;
}
out_unlock: out_unlock:
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret; return ret;
...@@ -2301,15 +2314,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -2301,15 +2314,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (!vma || vma->vm_start >= reg_end) if (!vma || vma->vm_start >= reg_end)
break; break;
/*
* Mapping a read-only VMA is only allowed if the
* memory region is configured as read-only.
*/
if (writable && !(vma->vm_flags & VM_WRITE)) {
ret = -EPERM;
break;
}
/* /*
* Take the intersection of this VMA with the memory region * Take the intersection of this VMA with the memory region
*/ */
......
...@@ -70,7 +70,7 @@ void kvm_vgic_early_init(struct kvm *kvm) ...@@ -70,7 +70,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
*/ */
int kvm_vgic_create(struct kvm *kvm, u32 type) int kvm_vgic_create(struct kvm *kvm, u32 type)
{ {
int i, vcpu_lock_idx = -1, ret; int i, ret;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
if (irqchip_in_kernel(kvm)) if (irqchip_in_kernel(kvm))
...@@ -86,17 +86,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -86,17 +86,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2) !kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV; return -ENODEV;
/*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
* vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
* that no other VCPUs are run while we create the vgic.
*/
ret = -EBUSY; ret = -EBUSY;
kvm_for_each_vcpu(i, vcpu, kvm) { if (!lock_all_vcpus(kvm))
if (!mutex_trylock(&vcpu->mutex)) return ret;
goto out_unlock;
vcpu_lock_idx = i;
}
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once) if (vcpu->arch.has_run_once)
...@@ -125,10 +117,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -125,10 +117,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock: out_unlock:
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { unlock_all_vcpus(kvm);
vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
mutex_unlock(&vcpu->mutex);
}
return ret; return ret;
} }
...@@ -177,6 +166,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) ...@@ -177,6 +166,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
break; break;
default: default:
kfree(dist->spis); kfree(dist->spis);
dist->spis = NULL;
return -EINVAL; return -EINVAL;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment