Commit a737f256 authored by Christoffer Dall's avatar Christoffer Dall Committed by Avi Kivity

KVM: Cleanup the kvm_print functions and introduce pr_XX wrappers

Introduces a couple of print functions, which are essentially wrappers
around standard printk functions, with a KVM: prefix.

Functions introduced or modified are:
 - kvm_err(fmt, ...)
 - kvm_info(fmt, ...)
 - kvm_debug(fmt, ...)
 - kvm_pr_unimpl(fmt, ...)
 - pr_unimpl(vcpu, fmt, ...) -> vcpu_unimpl(vcpu, fmt, ...)
Signed-off-by: default avatarChristoffer Dall <c.dall@virtualopensystems.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 4ae57b6c
...@@ -3185,8 +3185,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -3185,8 +3185,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
break; break;
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:
if (!boot_cpu_has(X86_FEATURE_LBRV)) { if (!boot_cpu_has(X86_FEATURE_LBRV)) {
pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
__func__, data); __func__, data);
break; break;
} }
if (data & DEBUGCTL_RESERVED_BITS) if (data & DEBUGCTL_RESERVED_BITS)
...@@ -3205,7 +3205,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -3205,7 +3205,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
case MSR_VM_CR: case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data); return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE: case MSR_VM_IGNNE:
pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break; break;
default: default:
return kvm_set_msr_common(vcpu, ecx, data); return kvm_set_msr_common(vcpu, ecx, data);
......
...@@ -4549,7 +4549,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) ...@@ -4549,7 +4549,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
break; break;
} }
vcpu->run->exit_reason = 0; vcpu->run->exit_reason = 0;
pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr); (int)(exit_qualification >> 4) & 3, cr);
return 0; return 0;
} }
......
...@@ -1437,8 +1437,8 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1437,8 +1437,8 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break; break;
} }
default: default:
pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data); "data 0x%llx\n", msr, data);
return 1; return 1;
} }
return 0; return 0;
...@@ -1470,8 +1470,8 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1470,8 +1470,8 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case HV_X64_MSR_TPR: case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
default: default:
pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data); "data 0x%llx\n", msr, data);
return 1; return 1;
} }
...@@ -1551,15 +1551,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1551,15 +1551,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */ data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) { if (data != 0) {
pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data); data);
return 1; return 1;
} }
break; break;
case MSR_FAM10H_MMIO_CONF_BASE: case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) { if (data != 0) {
pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data); "0x%llx\n", data);
return 1; return 1;
} }
break; break;
...@@ -1574,8 +1574,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1574,8 +1574,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
thus reserved and should throw a #GP */ thus reserved and should throw a #GP */
return 1; return 1;
} }
pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data); __func__, data);
break; break;
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE: case MSR_IA32_UCODE_WRITE:
...@@ -1671,8 +1671,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1671,8 +1671,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3: case MSR_K7_EVNTSEL3:
if (data != 0) if (data != 0)
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data); "0x%x data 0x%llx\n", msr, data);
break; break;
/* at least RHEL 4 unconditionally writes to the perfctr registers, /* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy. * so we ignore writes to make it happy.
...@@ -1681,8 +1681,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1681,8 +1681,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3: case MSR_K7_PERFCTR3:
pr_unimpl(vcpu, "unimplemented perfctr wrmsr: " vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data); "0x%x data 0x%llx\n", msr, data);
break; break;
case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1: case MSR_P6_PERFCTR1:
...@@ -1693,8 +1693,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1693,8 +1693,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return kvm_pmu_set_msr(vcpu, msr, data); return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0) if (pr || data != 0)
pr_unimpl(vcpu, "disabled perfctr wrmsr: " vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data); "0x%x data 0x%llx\n", msr, data);
break; break;
case MSR_K7_CLK_CTL: case MSR_K7_CLK_CTL:
/* /*
...@@ -1720,7 +1720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1720,7 +1720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* Drop writes to this legacy MSR -- see rdmsr /* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail. * counterpart for further detail.
*/ */
pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break; break;
case MSR_AMD64_OSVW_ID_LENGTH: case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu)) if (!guest_cpuid_has_osvw(vcpu))
...@@ -1738,12 +1738,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1738,12 +1738,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data); return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) { if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data); msr, data);
return 1; return 1;
} else { } else {
pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data); msr, data);
break; break;
} }
} }
...@@ -1846,7 +1846,7 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1846,7 +1846,7 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = kvm->arch.hv_hypercall; data = kvm->arch.hv_hypercall;
break; break;
default: default:
pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
} }
...@@ -1877,7 +1877,7 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1877,7 +1877,7 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.hv_vapic; data = vcpu->arch.hv_vapic;
break; break;
default: default:
pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
} }
*pdata = data; *pdata = data;
...@@ -2030,10 +2030,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2030,10 +2030,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata); return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) { if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
} else { } else {
pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
data = 0; data = 0;
} }
break; break;
...@@ -4116,7 +4116,7 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) ...@@ -4116,7 +4116,7 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
value = kvm_get_cr8(vcpu); value = kvm_get_cr8(vcpu);
break; break;
default: default:
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); kvm_err("%s: unexpected cr %u\n", __func__, cr);
return 0; return 0;
} }
...@@ -4145,7 +4145,7 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) ...@@ -4145,7 +4145,7 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
res = kvm_set_cr8(vcpu, val); res = kvm_set_cr8(vcpu, val);
break; break;
default: default:
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); kvm_err("%s: unexpected cr %u\n", __func__, cr);
res = -1; res = -1;
} }
......
...@@ -314,13 +314,19 @@ struct kvm { ...@@ -314,13 +314,19 @@ struct kvm {
long tlbs_dirty; long tlbs_dirty;
}; };
/* The guest did something we don't support. */ #define kvm_err(fmt, ...) \
#define pr_unimpl(vcpu, fmt, ...) \ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
pr_err_ratelimited("kvm: %i: cpu%i " fmt, \ #define kvm_info(fmt, ...) \
current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__) pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
pr_err_ratelimited("kvm [%i]: " fmt, \
task_tgid_nr(current), ## __VA_ARGS__)
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) /* The guest did something we don't support. */
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) #define vcpu_unimpl(vcpu, fmt, ...) \
kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment