Commit 04ebebf4 authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Add kvm_aux trace event

Add a MIPS specific trace event for auxiliary context operations
(notably FPU and MSA). Unfortunately the generic kvm_fpu trace event
isn't flexible enough to handle the range of interesting things that can
happen with FPU and MSA context.

The type of state being operated on is traced:
- FPU: Just the FPU registers.
- MSA: Just the upper half of the MSA vector registers (low half already
       loaded with FPU state).
- FPU & MSA: Full MSA vector state (includes FPU state).

As is the type of operation:
- Restore: State was enabled and restored.
- Save: State was saved and disabled.
- Enable: State was enabled (already loaded).
- Disable: State was disabled (kept loaded).
- Discard: State was discarded and disabled.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
[Fix remaining occurrence of "fpu_msa", change to "aux". - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f943176a
...@@ -1465,6 +1465,9 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) ...@@ -1465,6 +1465,9 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch); __kvm_restore_fpu(&vcpu->arch);
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
} else {
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
} }
preempt_enable(); preempt_enable();
...@@ -1513,6 +1516,7 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) ...@@ -1513,6 +1516,7 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
*/ */
__kvm_restore_msa_upper(&vcpu->arch); __kvm_restore_msa_upper(&vcpu->arch);
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
break; break;
case 0: case 0:
/* Neither FPU or MSA already active, restore full MSA state */ /* Neither FPU or MSA already active, restore full MSA state */
...@@ -1520,8 +1524,11 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) ...@@ -1520,8 +1524,11 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch)) if (kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
KVM_TRACE_AUX_FPU_MSA);
break; break;
default: default:
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
break; break;
} }
...@@ -1535,10 +1542,12 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu) ...@@ -1535,10 +1542,12 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
preempt_disable(); preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa(); disable_msa();
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
} }
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR); clear_c0_status(ST0_CU1 | ST0_FR);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
} }
preempt_enable(); preempt_enable();
...@@ -1560,6 +1569,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1560,6 +1569,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
enable_fpu_hazard(); enable_fpu_hazard();
__kvm_save_msa(&vcpu->arch); __kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
/* Disable MSA & FPU */ /* Disable MSA & FPU */
disable_msa(); disable_msa();
...@@ -1574,6 +1584,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) ...@@ -1574,6 +1584,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
__kvm_save_fpu(&vcpu->arch); __kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
/* Disable FPU */ /* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR); clear_c0_status(ST0_CU1 | ST0_FR);
......
...@@ -38,6 +38,52 @@ TRACE_EVENT(kvm_exit, ...@@ -38,6 +38,52 @@ TRACE_EVENT(kvm_exit,
__entry->pc) __entry->pc)
); );
#define KVM_TRACE_AUX_RESTORE 0
#define KVM_TRACE_AUX_SAVE 1
#define KVM_TRACE_AUX_ENABLE 2
#define KVM_TRACE_AUX_DISABLE 3
#define KVM_TRACE_AUX_DISCARD 4
#define KVM_TRACE_AUX_FPU 1
#define KVM_TRACE_AUX_MSA 2
#define KVM_TRACE_AUX_FPU_MSA 3
#define kvm_trace_symbol_aux_op \
{ KVM_TRACE_AUX_RESTORE, "restore" }, \
{ KVM_TRACE_AUX_SAVE, "save" }, \
{ KVM_TRACE_AUX_ENABLE, "enable" }, \
{ KVM_TRACE_AUX_DISABLE, "disable" }, \
{ KVM_TRACE_AUX_DISCARD, "discard" }
#define kvm_trace_symbol_aux_state \
{ KVM_TRACE_AUX_FPU, "FPU" }, \
{ KVM_TRACE_AUX_MSA, "MSA" }, \
{ KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
TRACE_EVENT(kvm_aux,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
unsigned int state),
TP_ARGS(vcpu, op, state),
TP_STRUCT__entry(
__field(unsigned long, pc)
__field(u8, op)
__field(u8, state)
),
TP_fast_assign(
__entry->pc = vcpu->arch.pc;
__entry->op = op;
__entry->state = state;
),
TP_printk("%s %s PC: 0x%08lx",
__print_symbolic(__entry->op,
kvm_trace_symbol_aux_op),
__print_symbolic(__entry->state,
kvm_trace_symbol_aux_state),
__entry->pc)
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment