Commit d8efcf38 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Three x86 fixes and one for ARM/ARM64.

  In particular, nested virtualization on Intel is broken in 3.13 and
  fixed by this pull request"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm, vmx: Really fix lazy FPU on nested guest
  kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
  arm/arm64: KVM: detect CPU reset on CPU_PM_EXIT
  KVM: MMU: drop read-only large sptes when creating lower level sptes
parents 78d9e934 1b385cbd
...@@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, ...@@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, unsigned long cmd,
void *v) void *v)
{ {
if (cmd == CPU_PM_EXIT) { if (cmd == CPU_PM_EXIT &&
__hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL); cpu_init_hyp_mode(NULL);
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -220,6 +220,10 @@ after_vfp_restore: ...@@ -220,6 +220,10 @@ after_vfp_restore:
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1. * passed in r0 and r1.
* *
* A function pointer with a value of 0xffffffff has a special meaning,
* and is used to implement __hyp_get_vectors in the same way as in
* arch/arm/kernel/hyp_stub.S.
*
* The calling convention follows the standard AAPCS: * The calling convention follows the standard AAPCS:
* r0 - r3: caller save * r0 - r3: caller save
* r12: caller save * r12: caller save
...@@ -363,6 +367,11 @@ hyp_hvc: ...@@ -363,6 +367,11 @@ hyp_hvc:
host_switch_to_hyp: host_switch_to_hyp:
pop {r0, r1, r2} pop {r0, r1, r2}
/* Check for __hyp_get_vectors */
cmp r0, #-1
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
beq 1f
push {lr} push {lr}
mrs lr, SPSR mrs lr, SPSR
push {lr} push {lr}
...@@ -378,7 +387,7 @@ THUMB( orr lr, #1) ...@@ -378,7 +387,7 @@ THUMB( orr lr, #1)
pop {lr} pop {lr}
msr SPSR_csxf, lr msr SPSR_csxf, lr
pop {lr} pop {lr}
eret 1: eret
guest_trap: guest_trap:
load_vcpu @ Load VCPU pointer to r0 load_vcpu @ Load VCPU pointer to r0
......
...@@ -694,6 +694,24 @@ __hyp_panic_str: ...@@ -694,6 +694,24 @@ __hyp_panic_str:
.align 2 .align 2
/*
* u64 kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
* only, since the stack will change between the caller and the callee.
*
* Call the function with the first argument containing a pointer to the
* function you wish to call in Hyp mode, and subsequent arguments will be
* passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
* A function pointer with a value of 0 has a special meaning, and is
* used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
ENTRY(kvm_call_hyp) ENTRY(kvm_call_hyp)
hvc #0 hvc #0
ret ret
...@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2 ...@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
pop x2, x3 pop x2, x3
pop x0, x1 pop x0, x1
push lr, xzr /* Check for __hyp_get_vectors */
cbnz x0, 1f
mrs x0, vbar_el2
b 2f
1: push lr, xzr
/* /*
* Compute the function address in EL2, and shuffle the parameters. * Compute the function address in EL2, and shuffle the parameters.
...@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2 ...@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
blr lr blr lr
pop lr, xzr pop lr, xzr
eret 2: eret
el1_trap: el1_trap:
/* /*
......
...@@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, ...@@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
break; break;
} }
drop_large_spte(vcpu, iterator.sptep);
if (!is_shadow_present_pte(*iterator.sptep)) { if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr; u64 base_addr = iterator.addr;
......
...@@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
else if (is_page_fault(intr_info)) else if (is_page_fault(intr_info))
return enable_ept; return enable_ept;
else if (is_no_device(intr_info) && else if (is_no_device(intr_info) &&
!(nested_read_cr0(vmcs12) & X86_CR0_TS)) !(vmcs12->guest_cr0 & X86_CR0_TS))
return 0; return 0;
return vmcs12->exception_bitmap & return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK)); (1u << (intr_info & INTR_INFO_VECTOR_MASK));
......
...@@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) ...@@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
frag->len -= len; frag->len -= len;
} }
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */ /* FIXME: return into emulator if single-stepping. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment