Commit cd82c4a7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more x86 kvm fixes from Paolo Bonzini:

 - Cache coherency fix for SEV live migration

 - Fix for instruction emulation with PKU

 - fixes for rare delaying of interrupt delivery

 - fix for SEV-ES buffer overflow

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SEV-ES: go over the sev_pio_data buffer in multiple passes if needed
  KVM: SEV-ES: keep INS functions together
  KVM: x86: remove unnecessary arguments from complete_emulator_pio_in
  KVM: x86: split the two parts of emulator_pio_in
  KVM: SEV-ES: clean up kvm_sev_es_ins/outs
  KVM: x86: leave vcpu->arch.pio.count alone in emulator_pio_in_out
  KVM: SEV-ES: rename guest_ins_data to sev_pio_data
  KVM: SEV: Flush cache on non-coherent systems before RECEIVE_UPDATE_DATA
  KVM: MMU: Reset mmu->pkru_mask to avoid stale data
  KVM: nVMX: promptly process interrupts delivered while in guest mode
  KVM: x86: check for interrupts before deciding whether to exit the fast path
parents 64222515 95e16b47
...@@ -702,7 +702,8 @@ struct kvm_vcpu_arch { ...@@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
struct kvm_pio_request pio; struct kvm_pio_request pio;
void *pio_data; void *pio_data;
void *guest_ins_data; void *sev_pio_data;
unsigned sev_pio_count;
u8 event_exit_inst_len; u8 event_exit_inst_len;
......
...@@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu) ...@@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
unsigned bit; unsigned bit;
bool wp; bool wp;
if (!is_cr4_pke(mmu)) { mmu->pkru_mask = 0;
mmu->pkru_mask = 0;
if (!is_cr4_pke(mmu))
return; return;
}
wp = is_cr0_wp(mmu); wp = is_cr0_wp(mmu);
......
...@@ -1484,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) ...@@ -1484,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_trans; goto e_free_trans;
} }
/*
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
* encrypts the written data with the guest's key, and the cache may
* contain dirty, unencrypted data.
*/
sev_clflush_pages(guest_page, n);
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
data.guest_address |= sev_me_mask; data.guest_address |= sev_me_mask;
......
...@@ -6305,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) ...@@ -6305,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
/* /*
* If we are running L2 and L1 has a new pending interrupt * If we are running L2 and L1 has a new pending interrupt
* which can be injected, we should re-evaluate * which can be injected, this may cause a vmexit or it may
* what should be done with this new L1 interrupt. * be injected into L2. Either way, this interrupt will be
* If L1 intercepts external-interrupts, we should * processed via KVM_REQ_EVENT, not RVI, because we do not use
* exit from L2 to L1. Otherwise, interrupt should be * virtual interrupt delivery to inject L1 interrupts into L2.
* delivered directly to L2.
*/ */
if (is_guest_mode(vcpu) && max_irr_updated) { if (is_guest_mode(vcpu) && max_irr_updated)
if (nested_exit_on_intr(vcpu)) kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_exiting_guest_mode(vcpu);
else
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
} else { } else {
max_irr = kvm_lapic_find_highest_irr(vcpu); max_irr = kvm_lapic_find_highest_irr(vcpu);
} }
......
...@@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) ...@@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
} }
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned short port,
unsigned int count, bool in) unsigned int count, bool in)
{ {
vcpu->arch.pio.port = port; vcpu->arch.pio.port = port;
...@@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, ...@@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
vcpu->arch.pio.count = count; vcpu->arch.pio.count = count;
vcpu->arch.pio.size = size; vcpu->arch.pio.size = size;
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { if (!kernel_pio(vcpu, vcpu->arch.pio_data))
vcpu->arch.pio.count = 0;
return 1; return 1;
}
vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
...@@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, ...@@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
return 0; return 0;
} }
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count) unsigned short port, unsigned int count)
{ {
int ret; WARN_ON(vcpu->arch.pio.count);
memset(vcpu->arch.pio_data, 0, size * count);
return emulator_pio_in_out(vcpu, size, port, count, true);
}
if (vcpu->arch.pio.count) static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
goto data_avail; {
int size = vcpu->arch.pio.size;
unsigned count = vcpu->arch.pio.count;
memcpy(val, vcpu->arch.pio_data, size * count);
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
vcpu->arch.pio.count = 0;
}
memset(vcpu->arch.pio_data, 0, size * count); static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count)
{
if (vcpu->arch.pio.count) {
/* Complete previous iteration. */
} else {
int r = __emulator_pio_in(vcpu, size, port, count);
if (!r)
return r;
ret = emulator_pio_in_out(vcpu, size, port, val, count, true); /* Results already available, fall through. */
if (ret) {
data_avail:
memcpy(val, vcpu->arch.pio_data, size * count);
trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
vcpu->arch.pio.count = 0;
return 1;
} }
return 0; WARN_ON(count != vcpu->arch.pio.count);
complete_emulator_pio_in(vcpu, val);
return 1;
} }
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
...@@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, ...@@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, const void *val, unsigned short port, const void *val,
unsigned int count) unsigned int count)
{ {
int ret;
memcpy(vcpu->arch.pio_data, val, size * count); memcpy(vcpu->arch.pio_data, val, size * count);
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); ret = emulator_pio_in_out(vcpu, size, port, count, false);
if (ret)
vcpu->arch.pio.count = 0;
return ret;
} }
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
...@@ -9643,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -9643,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break; break;
if (unlikely(kvm_vcpu_exit_request(vcpu))) { if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
break; break;
} }
}
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
}
/* /*
* Do this here before restoring debug registers on the host. And * Do this here before restoring debug registers on the host. And
...@@ -12368,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, ...@@ -12368,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
} }
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port);
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
{ {
memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data, int size = vcpu->arch.pio.size;
vcpu->arch.pio.count * vcpu->arch.pio.size); int port = vcpu->arch.pio.port;
vcpu->arch.pio.count = 0;
vcpu->arch.pio.count = 0;
if (vcpu->arch.sev_pio_count)
return kvm_sev_es_outs(vcpu, size, port);
return 1; return 1;
} }
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count) unsigned int port)
{ {
int ret; for (;;) {
unsigned int count =
ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port, min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
data, count); int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
if (ret)
return ret; /* memcpy done already by emulator_pio_out. */
vcpu->arch.sev_pio_count -= count;
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
if (!ret)
break;
vcpu->arch.pio.count = 0; /* Emulation done by the kernel. */
if (!vcpu->arch.sev_pio_count)
return 1;
}
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
return 0; return 0;
} }
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count) unsigned int port);
static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{ {
int ret; unsigned count = vcpu->arch.pio.count;
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
vcpu->arch.sev_pio_count -= count;
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
}
ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port, static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
data, count); {
if (ret) { int size = vcpu->arch.pio.size;
vcpu->arch.pio.count = 0; int port = vcpu->arch.pio.port;
} else {
vcpu->arch.guest_ins_data = data; advance_sev_es_emulated_ins(vcpu);
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; if (vcpu->arch.sev_pio_count)
return kvm_sev_es_ins(vcpu, size, port);
return 1;
}
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port)
{
for (;;) {
unsigned int count =
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
if (!__emulator_pio_in(vcpu, size, port, count))
break;
/* Emulation done by the kernel. */
advance_sev_es_emulated_ins(vcpu);
if (!vcpu->arch.sev_pio_count)
return 1;
} }
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
return 0; return 0;
} }
...@@ -12413,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, ...@@ -12413,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count, unsigned int port, void *data, unsigned int count,
int in) int in)
{ {
return in ? kvm_sev_es_ins(vcpu, size, port, data, count) vcpu->arch.sev_pio_data = data;
: kvm_sev_es_outs(vcpu, size, port, data, count); vcpu->arch.sev_pio_count = count;
return in ? kvm_sev_es_ins(vcpu, size, port)
: kvm_sev_es_outs(vcpu, size, port);
} }
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment