Commit 3824f787 authored by David Hildenbrand's avatar David Hildenbrand Committed by Greg Kroah-Hartman

KVM: s390: fix memory overwrites when vx is disabled

commit 9abc2a08 upstream.

The kernel now always uses vector registers when available, however KVM
has special logic if support is really enabled for a guest. If support
is disabled, guest_fpregs.fregs will only contain memory for the fpu.
The kernel, however, will store vector registers into that area,
resulting in crazy memory overwrites.

Simply extending that area is not enough, because the format of the
registers also changes. We would have to do additional conversions, making
the code even more complex. Therefore let's directly use one place for
the vector/fpu registers + fpc (in kvm_run). We just have to convert the
data properly when accessing it. This makes current code much easier.

Please note that vector/fpu registers are now always stored to
vcpu->run->s.regs.vrs. Although this data is visible to QEMU and
used for migration, we only guarantee valid values to user space  when
KVM_SYNC_VRS is set. As that is only the case when we have vector
register support, we are on the safe side.

Fixes: b5510d9b ("s390/fpu: always enable the vector facility if it is available")
Cc: stable@vger.kernel.org # v4.4 d9a3a09a s390/kvm: remove dependency on struct save_area definition
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
[adopt to d9a3a09a]
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b5e72574
...@@ -506,7 +506,6 @@ struct kvm_vcpu_arch { ...@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block; struct kvm_s390_sie_block *sie_block;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
struct fpu host_fpregs; struct fpu host_fpregs;
struct fpu guest_fpregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
......
...@@ -1202,7 +1202,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -1202,7 +1202,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.use_cmma) if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu); kvm_s390_vcpu_unsetup_cmma(vcpu);
kfree(vcpu->arch.guest_fpregs.fprs);
free_page((unsigned long)(vcpu->arch.sie_block)); free_page((unsigned long)(vcpu->arch.sie_block));
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
...@@ -1269,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1269,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
/*
* Backs up the current FP/VX register save area on a particular
* destination. Used to switch between different register save
* areas.
*/
static inline void save_fpu_to(struct fpu *dst)
{
dst->fpc = current->thread.fpu.fpc;
dst->regs = current->thread.fpu.regs;
}
/*
* Switches the FP/VX register save area from which to lazy
* restore register contents.
*/
static inline void load_fpu_from(struct fpu *from)
{
current->thread.fpu.fpc = from->fpc;
current->thread.fpu.regs = from->regs;
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
/* Save host register state */ /* Save host register state */
save_fpu_regs(); save_fpu_regs();
save_fpu_to(&vcpu->arch.host_fpregs); vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
if (test_kvm_facility(vcpu->kvm, 129)) { /* Depending on MACHINE_HAS_VX, data stored to vrs either
current->thread.fpu.fpc = vcpu->run->s.regs.fpc; * has vector register or floating point register format.
/*
* Use the register save area in the SIE-control block
* for register restore and save in kvm_arch_vcpu_put()
*/ */
current->thread.fpu.vxrs = current->thread.fpu.regs = vcpu->run->s.regs.vrs;
(__vector128 *)&vcpu->run->s.regs.vrs; current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
} else
load_fpu_from(&vcpu->arch.guest_fpregs);
if (test_fp_ctl(current->thread.fpu.fpc)) if (test_fp_ctl(current->thread.fpu.fpc))
/* User space provided an invalid FPC, let's clear it */ /* User space provided an invalid FPC, let's clear it */
current->thread.fpu.fpc = 0; current->thread.fpu.fpc = 0;
...@@ -1322,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1322,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
/* Save guest register state */
save_fpu_regs(); save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129))
/*
* kvm_arch_vcpu_load() set up the register save area to
* the &vcpu->run->s.regs.vrs and, thus, the vector registers
* are already saved. Only the floating-point control must be
* copied.
*/
vcpu->run->s.regs.fpc = current->thread.fpu.fpc; vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
else
save_fpu_to(&vcpu->arch.guest_fpregs); /* Restore host register state */
load_fpu_from(&vcpu->arch.host_fpregs); current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->arch.host_acrs);
...@@ -1352,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) ...@@ -1352,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[0] = 0xE0UL;
vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
vcpu->arch.guest_fpregs.fpc = 0; /* make sure the new fpc will be lazily loaded */
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); save_fpu_regs();
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0; vcpu->arch.sie_block->pp = 0;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
...@@ -1502,29 +1470,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1502,29 +1470,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
/*
* Allocate a save area for floating-point registers. If the vector
* extension is available, register contents are saved in the SIE
* control block. The allocated save area is still required in
* particular places, for example, in kvm_s390_vcpu_store_status().
*/
vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
GFP_KERNEL);
if (!vcpu->arch.guest_fpregs.fprs) {
rc = -ENOMEM;
goto out_free_sie_block;
}
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc) if (rc)
goto out_free_fprs; goto out_free_sie_block;
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block); vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
return vcpu; return vcpu;
out_free_fprs:
kfree(vcpu->arch.guest_fpregs.fprs);
out_free_sie_block: out_free_sie_block:
free_page((unsigned long)(vcpu->arch.sie_block)); free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu: out_free_cpu:
...@@ -1737,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ...@@ -1737,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
/* make sure the new values will be lazily loaded */
save_fpu_regs();
if (test_fp_ctl(fpu->fpc)) if (test_fp_ctl(fpu->fpc))
return -EINVAL; return -EINVAL;
memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); current->thread.fpu.fpc = fpu->fpc;
vcpu->arch.guest_fpregs.fpc = fpu->fpc; if (MACHINE_HAS_VX)
save_fpu_regs(); convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
load_fpu_from(&vcpu->arch.guest_fpregs); else
memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); /* make sure we have the latest values */
fpu->fpc = vcpu->arch.guest_fpregs.fpc; save_fpu_regs();
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
else
memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc;
return 0; return 0;
} }
...@@ -2269,6 +2230,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2269,6 +2230,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
{ {
unsigned char archmode = 1; unsigned char archmode = 1;
freg_t fprs[NUM_FPRS];
unsigned int px; unsigned int px;
u64 clkcomp; u64 clkcomp;
int rc; int rc;
...@@ -2284,8 +2246,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) ...@@ -2284,8 +2246,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
gpa = px; gpa = px;
} else } else
gpa -= __LC_FPREGS_SAVE_AREA; gpa -= __LC_FPREGS_SAVE_AREA;
/* manually convert vector registers if necessary */
if (MACHINE_HAS_VX) {
convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu->arch.guest_fpregs.fprs, 128); fprs, 128);
} else {
rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu->run->s.regs.vrs, 128);
}
rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128); vcpu->run->s.regs.gprs, 128);
rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
...@@ -2293,7 +2263,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) ...@@ -2293,7 +2263,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
&px, 4); &px, 4);
rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
&vcpu->arch.guest_fpregs.fpc, 4); &vcpu->run->s.regs.fpc, 4);
rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
&vcpu->arch.sie_block->todpr, 4); &vcpu->arch.sie_block->todpr, 4);
rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
...@@ -2316,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -2316,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* it into the save area * it into the save area
*/ */
save_fpu_regs(); save_fpu_regs();
if (test_kvm_facility(vcpu->kvm, 129)) { vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
/*
* If the vector extension is available, the vector registers
* which overlaps with floating-point registers are saved in
* the SIE-control block. Hence, extract the floating-point
* registers and the FPC value and store them in the
* guest_fpregs structure.
*/
vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
current->thread.fpu.vxrs);
} else
save_fpu_to(&vcpu->arch.guest_fpregs);
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment