Commit 97e69aa6 authored by Vasiliy Kulikov's avatar Vasiliy Kulikov Committed by Marcelo Tosatti

KVM: x86: fix information leak to userland

Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized.  It leads to leaking of contents of kernel stack
memory.  We have to initialize them to zero.

In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct.  It makes sense as these
fields are explicitly marked as padding.  No more fields need zeroing.

KVM-Stable-Tag.
Signed-off-by: default avatarVasiliy Kulikov <segooon@gmail.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent d8cdddcd
...@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
!kvm_exception_is_soft(vcpu->arch.exception.nr); !kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr; events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code; events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected = events->interrupt.injected =
...@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, ...@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector; events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW); | KVM_VCPUEVENT_VALID_SHADOW);
memset(&events->reserved, 0, sizeof(events->reserved));
} }
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
...@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, ...@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7; dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0; dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
} }
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
...@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) ...@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
sizeof(ps->channels)); sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags; ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock); mutex_unlock(&kvm->arch.vpit->pit_state.lock);
memset(&ps->reserved, 0, sizeof(ps->reserved));
return r; return r;
} }
...@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
user_ns.clock = kvm->arch.kvmclock_offset + now_ns; user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable(); local_irq_enable();
user_ns.flags = 0; user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT; r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns))) if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment