Commit bd80158a authored by Jan Kiszka's avatar Jan Kiszka Committed by Avi Kivity

KVM: Clean up and extend rate-limited output

The use of printk_ratelimit is discouraged, replace it with
pr*_ratelimited or __ratelimit. While at it, convert remaining
guest-triggerable printks to rate-limited variants.
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 7712de87
...@@ -34,6 +34,9 @@ ...@@ -34,6 +34,9 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include "trace.h" #include "trace.h"
#define pr_pic_unimpl(fmt, ...) \
pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)
static void pic_irq_request(struct kvm *kvm, int level); static void pic_irq_request(struct kvm *kvm, int level);
static void pic_lock(struct kvm_pic *s) static void pic_lock(struct kvm_pic *s)
...@@ -306,10 +309,10 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) ...@@ -306,10 +309,10 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
} }
s->init_state = 1; s->init_state = 1;
if (val & 0x02) if (val & 0x02)
printk(KERN_ERR "single mode not supported"); pr_pic_unimpl("single mode not supported");
if (val & 0x08) if (val & 0x08)
printk(KERN_ERR pr_pic_unimpl(
"level sensitive irq not supported"); "level sensitive irq not supported");
} else if (val & 0x08) { } else if (val & 0x08) {
if (val & 0x04) if (val & 0x04)
s->poll = 1; s->poll = 1;
...@@ -467,8 +470,7 @@ static int picdev_write(struct kvm_pic *s, ...@@ -467,8 +470,7 @@ static int picdev_write(struct kvm_pic *s,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (len != 1) { if (len != 1) {
if (printk_ratelimit()) pr_pic_unimpl("non byte write\n");
printk(KERN_ERR "PIC: non byte write\n");
return 0; return 0;
} }
pic_lock(s); pic_lock(s);
...@@ -496,8 +498,7 @@ static int picdev_read(struct kvm_pic *s, ...@@ -496,8 +498,7 @@ static int picdev_read(struct kvm_pic *s,
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (len != 1) { if (len != 1) {
if (printk_ratelimit()) pr_pic_unimpl("non byte read\n");
printk(KERN_ERR "PIC: non byte read\n");
return 0; return 0;
} }
pic_lock(s); pic_lock(s);
......
...@@ -121,16 +121,16 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -121,16 +121,16 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{ {
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
unsigned long *rmapp; unsigned long *rmapp;
struct kvm_mmu_page *rev_sp; struct kvm_mmu_page *rev_sp;
gfn_t gfn; gfn_t gfn;
rev_sp = page_header(__pa(sptep)); rev_sp = page_header(__pa(sptep));
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
if (!gfn_to_memslot(kvm, gfn)) { if (!gfn_to_memslot(kvm, gfn)) {
if (!printk_ratelimit()) if (!__ratelimit(&ratelimit_state))
return; return;
audit_printk(kvm, "no memslot for gfn %llx\n", gfn); audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
...@@ -141,7 +141,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -141,7 +141,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
if (!*rmapp) { if (!*rmapp) {
if (!printk_ratelimit()) if (!__ratelimit(&ratelimit_state))
return; return;
audit_printk(kvm, "no rmap for writable spte %llx\n", audit_printk(kvm, "no rmap for writable spte %llx\n",
*sptep); *sptep);
......
...@@ -2762,8 +2762,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) ...@@ -2762,8 +2762,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
printk(KERN_DEBUG "%s: tss fixup for long mode. \n", pr_debug_ratelimited("%s: tss fixup for long mode. \n",
__func__); __func__);
vmcs_write32(GUEST_TR_AR_BYTES, vmcs_write32(GUEST_TR_AR_BYTES,
(guest_tr_ar & ~AR_TYPE_MASK) (guest_tr_ar & ~AR_TYPE_MASK)
| AR_TYPE_BUSY_64_TSS); | AR_TYPE_BUSY_64_TSS);
...@@ -5634,8 +5634,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) ...@@ -5634,8 +5634,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return 0; return 0;
if (unlikely(vmx->fail)) { if (unlikely(vmx->fail)) {
printk(KERN_INFO "%s failed vm entry %x\n", pr_info_ratelimited("%s failed vm entry %x\n", __func__,
__func__, vmcs_read32(VM_INSTRUCTION_ERROR)); vmcs_read32(VM_INSTRUCTION_ERROR));
return 1; return 1;
} }
...@@ -6612,9 +6612,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -6612,9 +6612,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (vmcs12->vm_entry_msr_load_count > 0 || if (vmcs12->vm_entry_msr_load_count > 0 ||
vmcs12->vm_exit_msr_load_count > 0 || vmcs12->vm_exit_msr_load_count > 0 ||
vmcs12->vm_exit_msr_store_count > 0) { vmcs12->vm_exit_msr_store_count > 0) {
if (printk_ratelimit()) pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
printk(KERN_WARNING __func__);
"%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__);
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1; return 1;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/ratelimit.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <linux/kvm.h> #include <linux/kvm.h>
...@@ -282,11 +283,8 @@ struct kvm { ...@@ -282,11 +283,8 @@ struct kvm {
/* The guest did something we don't support. */ /* The guest did something we don't support. */
#define pr_unimpl(vcpu, fmt, ...) \ #define pr_unimpl(vcpu, fmt, ...) \
do { \ pr_err_ratelimited("kvm: %i: cpu%i " fmt, \
if (printk_ratelimit()) \ current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
} while (0)
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment