Commit e0231715 authored by Joerg Roedel's avatar Joerg Roedel Committed by Avi Kivity

KVM: SVM: Coding style cleanup

This patch removes whitespace errors, fixes comment formats
and most of checkpatch warnings. Now vim does not show
c-space-errors anymore.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 83bf0002
...@@ -120,7 +120,7 @@ struct vcpu_svm { ...@@ -120,7 +120,7 @@ struct vcpu_svm {
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true; static bool npt_enabled = true;
#else #else
static bool npt_enabled = false; static bool npt_enabled;
#endif #endif
static int npt = 1; static int npt = 1;
...@@ -168,8 +168,8 @@ static unsigned long iopm_base; ...@@ -168,8 +168,8 @@ static unsigned long iopm_base;
struct kvm_ldttss_desc { struct kvm_ldttss_desc {
u16 limit0; u16 limit0;
u16 base0; u16 base0;
unsigned base1 : 8, type : 5, dpl : 2, p : 1; unsigned base1:8, type:5, dpl:2, p:1;
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3; u32 base3;
u32 zero1; u32 zero1;
} __attribute__((packed)); } __attribute__((packed));
...@@ -218,7 +218,7 @@ static inline void stgi(void) ...@@ -218,7 +218,7 @@ static inline void stgi(void)
static inline void invlpga(unsigned long addr, u32 asid) static inline void invlpga(unsigned long addr, u32 asid)
{ {
asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid)); asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
} }
static inline void force_new_asid(struct kvm_vcpu *vcpu) static inline void force_new_asid(struct kvm_vcpu *vcpu)
...@@ -290,8 +290,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, ...@@ -290,8 +290,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
/* If we are within a nested VM we'd better #VMEXIT and let the /*
guest handle the exception */ * If we are within a nested VM we'd better #VMEXIT and let the guest
* handle the exception
*/
if (nested_svm_check_exception(svm, nr, has_error_code, error_code)) if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
return; return;
...@@ -657,7 +659,8 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -657,7 +659,8 @@ static void init_vmcb(struct vcpu_svm *svm)
save->rip = 0x0000fff0; save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
/* This is the guest-visible cr0 value. /*
* This is the guest-visible cr0 value.
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
*/ */
svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; svm->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
...@@ -903,7 +906,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, ...@@ -903,7 +906,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
/* AMD's VMCB does not have an explicit unusable field, so emulate it /*
* AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present" * for cross vendor migration purposes by "not present"
*/ */
var->unusable = !var->present || (var->type == 0); var->unusable = !var->present || (var->type == 0);
...@@ -939,7 +943,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, ...@@ -939,7 +943,8 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
var->type |= 0x1; var->type |= 0x1;
break; break;
case VCPU_SREG_SS: case VCPU_SREG_SS:
/* On AMD CPUs sometimes the DB bit in the segment /*
* On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has * descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX * been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating. * entry check when cross vendor migrating.
...@@ -1270,7 +1275,7 @@ static int db_interception(struct vcpu_svm *svm) ...@@ -1270,7 +1275,7 @@ static int db_interception(struct vcpu_svm *svm)
} }
if (svm->vcpu.guest_debug & if (svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip; svm->vmcb->save.cs.base + svm->vmcb->save.rip;
...@@ -1554,13 +1559,13 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) ...@@ -1554,13 +1559,13 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
case SVM_EXIT_INTR: case SVM_EXIT_INTR:
case SVM_EXIT_NMI: case SVM_EXIT_NMI:
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
/* For now we are always handling NPFs when using them */
case SVM_EXIT_NPF: case SVM_EXIT_NPF:
/* For now we are always handling NPFs when using them */
if (npt_enabled) if (npt_enabled)
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
break; break;
/* When we're shadowing, trap PFs */
case SVM_EXIT_EXCP_BASE + PF_VECTOR: case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs */
if (!npt_enabled) if (!npt_enabled)
return NESTED_EXIT_HOST; return NESTED_EXIT_HOST;
break; break;
...@@ -1795,7 +1800,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) ...@@ -1795,7 +1800,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!nested_msrpm) if (!nested_msrpm)
return false; return false;
for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
...@@ -1829,8 +1834,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -1829,8 +1834,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
kvm_clear_exception_queue(&svm->vcpu); kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu);
/* Save the old vmcb, so we don't need to pick what we save, but /*
can restore everything when a VMEXIT occurs */ * Save the old vmcb, so we don't need to pick what we save, but can
* restore everything when a VMEXIT occurs
*/
hsave->save.es = vmcb->save.es; hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs; hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss; hsave->save.ss = vmcb->save.ss;
...@@ -1878,6 +1885,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -1878,6 +1885,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */ /* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax; svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp; svm->vmcb->save.rsp = nested_vmcb->save.rsp;
...@@ -1909,8 +1917,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -1909,8 +1917,10 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
} }
/* We don't want a nested guest to be more powerful than the guest, /*
so all intercepts are ORed */ * We don't want a nested guest to be more powerful than the guest, so
* all intercepts are ORed
*/
svm->vmcb->control.intercept_cr_read |= svm->vmcb->control.intercept_cr_read |=
nested_vmcb->control.intercept_cr_read; nested_vmcb->control.intercept_cr_read;
svm->vmcb->control.intercept_cr_write |= svm->vmcb->control.intercept_cr_write |=
...@@ -2224,9 +2234,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -2224,9 +2234,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
case MSR_IA32_SYSENTER_ESP: case MSR_IA32_SYSENTER_ESP:
*data = svm->sysenter_esp; *data = svm->sysenter_esp;
break; break;
/* Nobody will change the following 5 values in the VMCB so /*
we can safely return them on rdmsr. They will always be 0 * Nobody will change the following 5 values in the VMCB so we can
until LBRV is implemented. */ * safely return them on rdmsr. They will always be 0 until LBRV is
* implemented.
*/
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:
*data = svm->vmcb->save.dbgctl; *data = svm->vmcb->save.dbgctl;
break; break;
...@@ -2441,7 +2453,6 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { ...@@ -2441,7 +2453,6 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_SMI] = nop_on_interception, [SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception, [SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception, [SVM_EXIT_VINTR] = interrupt_window_interception,
/* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_INVD] = emulate_on_interception,
...@@ -2650,10 +2661,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) ...@@ -2650,10 +2661,12 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
/* In case GIF=0 we can't rely on the CPU to tell us when /*
* GIF becomes 1, because that's a separate STGI/VMRUN intercept. * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
* The next time we get that intercept, this function will be * 1, because that's a separate STGI/VMRUN intercept. The next time we
* called again though and we'll get the vintr intercept. */ * get that intercept, this function will be called again though and
* we'll get the vintr intercept.
*/
if (gif_set(svm) && nested_svm_intr(svm)) { if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm); svm_set_vintr(svm);
svm_inject_irq(svm, 0x0); svm_inject_irq(svm, 0x0);
...@@ -2668,9 +2681,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) ...@@ -2668,9 +2681,10 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
== HF_NMI_MASK) == HF_NMI_MASK)
return; /* IRET will cause a vm exit */ return; /* IRET will cause a vm exit */
/* Something prevents NMI from been injected. Single step over /*
possible problem (IRET or exception injection or interrupt * Something prevents NMI from been injected. Single step over possible
shadow) */ * problem (IRET or exception injection or interrupt shadow)
*/
svm->nmi_singlestep = true; svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_intercept(vcpu); update_db_intercept(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment