Commit 3f7ba7e1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Radim Krčmář:
 "MIPS:
   - Fix build with KVM, DYNAMIC_DEBUG and JUMP_LABEL.

  PPC:
   - Fix host crashes/hangs on POWER9.
   - Properly restore userspace state after KVM_RUN ioctl.

  s390:
   - Fix address translation in odd-ball cases (real-space designation
     ASCEs).

  x86:
   - Fix privilege escalation in 64-bit Windows guests

  All patches are for stable and the x86 also has a CVE"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: fix singlestepping over syscall
  KVM: s390: gaccess: fix real-space designation asce handling for gmap shadows
  KVM: MIPS: Fix maybe-uninitialized build failure
  KVM: PPC: Book3S HV: Ignore timebase offset on POWER9 DD1
  KVM: PPC: Book3S HV: Save/restore host values of debug registers
  KVM: PPC: Book3S HV: Preserve userspace HTM state properly
  KVM: PPC: Book3S HV: Restore critical SPRs to host values on guest exit
  KVM: PPC: Book3S HV: Context-switch EBB registers properly
  KVM: PPC: Book3S HV: Cope with host using large decrementer mode
parents 4f92f0e2 c8401dda
...@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi) ...@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel) bool user, bool kernel)
{ {
int idx_user, idx_kernel; /*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi; unsigned long flags, old_entryhi;
local_irq_save(flags); local_irq_save(flags);
......
...@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break; break;
case KVM_REG_PPC_TB_OFFSET: case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */ /* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset = vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24); ALIGN(set_reg_val(id, *val), 1UL << 24);
...@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
int r; int r;
int srcu_idx; int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0;
unsigned int user_vrsave;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */ /* No need to go into the guest when all we'll do is come back out */
...@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
ebb_regs[0] = mfspr(SPRN_EBBHR);
ebb_regs[1] = mfspr(SPRN_EBBRR);
ebb_regs[2] = mfspr(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
...@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} while (is_kvmppc_resume_guest(r)); } while (is_kvmppc_resume_guest(r));
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
mtspr(SPRN_EBBHR, ebb_regs[0]);
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
out: out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);
......
...@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
*/ */
BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
andis. r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
mtspr SPRN_HDEC,r8 BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)
......
...@@ -32,12 +32,29 @@ ...@@ -32,12 +32,29 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xive-regs.h> #include <asm/xive-regs.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
BEGIN_FTR_SECTION; \
extsw reg, reg; \
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
/* Values in HSTATE_NAPPING(r13) */ /* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1 #define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2 #define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS 144
#define STACK_SLOT_TRAP (SFS-4)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
/* /*
* Call kvmppc_hv_entry in real mode. * Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled. * Must be called with interrupts hard-disabled.
...@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
/* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3 mtspr SPRN_DEC, r3
/* /*
...@@ -295,8 +314,9 @@ kvm_novcpu_wakeup: ...@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */ /* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC mfspr r0, SPRN_HDEC
EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r0, 0 cmpdi r0, 0
blt kvm_novcpu_exit blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
...@@ -319,10 +339,10 @@ kvm_novcpu_exit: ...@@ -319,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
#endif #endif
13: mr r3, r12 13: mr r3, r12
stw r12, 112-4(r1) stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
lwz r12, 112-4(r1) lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
...@@ -390,8 +410,8 @@ kvm_secondary_got_guest: ...@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13) lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0 cmpwi r4, 0
bne 63f bne 63f
lis r6, 0x7fff LOAD_REG_ADDR(r6, decrementer_max)
ori r6, r6, 0xffff ld r6, 0(r6)
mtspr SPRN_HDEC, r6 mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */ /* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13) ld r6, HSTATE_SPLIT_MODE(r13)
...@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* * * *
*****************************************************************************/ *****************************************************************************/
/* Stack frame offsets */
#define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24)
#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
...@@ -565,7 +580,7 @@ kvmppc_hv_entry: ...@@ -565,7 +580,7 @@ kvmppc_hv_entry:
*/ */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1) stdu r1, -SFS(r1)
/* Save R1 in the PACA */ /* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13) std r1, HSTATE_HOST_R1(r13)
...@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION ...@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID mfspr r7, SPRN_PID
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1) std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1) std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1) std r7, STACK_SLOT_PID(r1)
std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR
mfspr r7, SPRN_DAWRX
std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1)
std r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Set partition DABR */ /* Set partition DABR */
...@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ...@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */ /* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
cmpwi r3, 512 /* 1 microsecond */ EXTEND_HDEC(r3)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon blt hdec_soon
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
...@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ...@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host. * set by the guest could disrupt the host.
*/ */
li r0, 0 li r0, 0
mtspr SPRN_IAMR, r0 mtspr SPRN_PSPB, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_WORT, r0 mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0 mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1 li r0, 1
...@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9) std r6,VCPU_UAMOR(r9)
li r6,0 li r6,0
mtspr SPRN_AMR,r6 mtspr SPRN_AMR,r6
mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */ /* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR mfspr r8, SPRN_DSCR
...@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync ptesync
/* Restore host values of some registers */ /* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
ld r6, STACK_SLOT_DAWR(r1)
ld r7, STACK_SLOT_DAWRX(r1)
mtspr SPRN_CIABR, r5
mtspr SPRN_DAWR, r6
mtspr SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1) ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1) ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1) ld r7, STACK_SLOT_PID(r1)
ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5 mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6 mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7 mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT PPC_INVALIDATE_ERAT
...@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ...@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
ld r0, 112+PPC_LR_STKOFF(r1) ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, 112 addi r1, r1, SFS
mtlr r0 mtlr r0
blr blr
...@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) ...@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC mfspr r4, SPRN_HDEC
mftb r5 mftb r5
cmpw r3, r4 extsw r3, r3
EXTEND_HDEC(r4)
cmpd r3, r4
ble 67f ble 67f
mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4
67: 67:
/* save expiry time of guest decrementer */ /* save expiry time of guest decrementer */
extsw r3, r3
add r3, r3, r5 add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)
......
...@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096; ptr = asce.origin * 4096;
if (asce.r) { if (asce.r) {
*fake = 1; *fake = 1;
ptr = 0;
asce.dt = ASCE_TYPE_REGION1; asce.dt = ASCE_TYPE_REGION1;
} }
switch (asce.dt) { switch (asce.dt) {
case ASCE_TYPE_REGION1: case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl && !asce.r) if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS; return PGM_REGION_FIRST_TRANS;
break; break;
case ASCE_TYPE_REGION2: case ASCE_TYPE_REGION2:
...@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte; union region1_table_entry rfte;
if (*fake) { if (*fake) {
/* offset in 16EB guest memory block */ ptr += (unsigned long) vaddr.rfx << 53;
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
rfte.val = ptr; rfte.val = ptr;
goto shadow_r2t; goto shadow_r2t;
} }
...@@ -1036,8 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1036,8 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region2_table_entry rste; union region2_table_entry rste;
if (*fake) { if (*fake) {
/* offset in 8PB guest memory block */ ptr += (unsigned long) vaddr.rsx << 42;
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
rste.val = ptr; rste.val = ptr;
goto shadow_r3t; goto shadow_r3t;
} }
...@@ -1064,8 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1064,8 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region3_table_entry rtte; union region3_table_entry rtte;
if (*fake) { if (*fake) {
/* offset in 4TB guest memory block */ ptr += (unsigned long) vaddr.rtx << 31;
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
rtte.val = ptr; rtte.val = ptr;
goto shadow_sgt; goto shadow_sgt;
} }
...@@ -1101,8 +1099,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1101,8 +1099,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union segment_table_entry ste; union segment_table_entry ste;
if (*fake) { if (*fake) {
/* offset in 2G guest memory block */ ptr += (unsigned long) vaddr.sx << 20;
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
ste.val = ptr; ste.val = ptr;
goto shadow_pgt; goto shadow_pgt;
} }
......
...@@ -296,6 +296,7 @@ struct x86_emulate_ctxt { ...@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */ bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */ bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception; bool have_exception;
struct x86_exception exception; struct x86_exception exception;
......
...@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ...@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
} }
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
......
...@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) ...@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
ctxt->eip = kvm_rip_read(vcpu); ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
...@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, ...@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6; return dr6;
} }
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
/* if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
* rflags is the old, "raw" value of the flags. The new value has kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
* not been saved yet. kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
* kvm_run->debug.arch.exception = DB_VECTOR;
* This is correct even for TF set by the guest, because "the kvm_run->exit_reason = KVM_EXIT_DEBUG;
* processor will not generate this exception after the instruction *r = EMULATE_USER_EXIT;
* that sets the TF flag". } else {
*/ /*
if (unlikely(rflags & X86_EFLAGS_TF)) { * "Certain debug exceptions may clear bit 0-3. The
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { * remaining contents of the DR6 register are never
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | * cleared by the processor".
DR6_RTM; */
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; vcpu->arch.dr6 &= ~15;
kvm_run->debug.arch.exception = DB_VECTOR; vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_queue_exception(vcpu, DB_VECTOR);
*r = EMULATE_USER_EXIT;
} else {
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
* cleared by the processor".
*/
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
}
} }
} }
...@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int r = EMULATE_DONE; int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
*
* This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction
* that sets the TF flag".
*/
if (unlikely(rflags & X86_EFLAGS_TF))
kvm_vcpu_do_singlestep(vcpu, &r);
return r == EMULATE_DONE; return r == EMULATE_DONE;
} }
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
...@@ -5726,8 +5727,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -5726,8 +5727,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE) if (r == EMULATE_DONE &&
kvm_vcpu_check_singlestep(vcpu, rflags, &r); (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception || if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags); __kvm_set_rflags(vcpu, ctxt->eflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment