Commit 38ea7a71 authored by James Hogan's avatar James Hogan Committed by Paolo Bonzini

MIPS: KVM: Check MSA presence at uasm time

Check for presence of MSA at uasm assembly time rather than at runtime
in the generated KVM host entry code. This optimises the guest exit path
by eliminating the MSA code entirely if not present, and eliminating the
read of Config3.MSAP and conditional branch if MSA is present.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim KrÄmáŠ<rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d37f4038
...@@ -55,7 +55,6 @@ ...@@ -55,7 +55,6 @@
#define C0_CAUSE 13, 0 #define C0_CAUSE 13, 0
#define C0_EPC 14, 0 #define C0_EPC 14, 0
#define C0_EBASE 15, 1 #define C0_EBASE 15, 1
#define C0_CONFIG3 16, 3
#define C0_CONFIG5 16, 5 #define C0_CONFIG5 16, 5
#define C0_DDATA_LO 28, 3 #define C0_DDATA_LO 28, 3
#define C0_ERROREPC 30, 0 #define C0_ERROREPC 30, 0
...@@ -409,25 +408,21 @@ void *kvm_mips_build_exit(void *addr) ...@@ -409,25 +408,21 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_fpu_1(&l, p); uasm_l_fpu_1(&l, p);
} }
#ifdef CONFIG_CPU_HAS_MSA if (cpu_has_msa) {
/* /*
* If MSA is enabled, save MSACSR and clear it so that later * If MSA is enabled, save MSACSR and clear it so that later
* instructions don't trigger MSAFPE for pending exceptions. * instructions don't trigger MSAFPE for pending exceptions.
*/ */
uasm_i_mfc0(&p, T0, C0_CONFIG3); uasm_i_mfc0(&p, T0, C0_CONFIG5);
uasm_i_ext(&p, T0, T0, 28, 1); /* MIPS_CONF3_MSAP */ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
uasm_il_beqz(&p, &r, T0, label_msa_1); uasm_il_beqz(&p, &r, T0, label_msa_1);
uasm_i_nop(&p); uasm_i_nop(&p);
uasm_i_mfc0(&p, T0, C0_CONFIG5); uasm_i_cfcmsa(&p, T0, MSA_CSR);
uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
uasm_il_beqz(&p, &r, T0, label_msa_1); K1);
uasm_i_nop(&p); uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
uasm_i_cfcmsa(&p, T0, MSA_CSR); uasm_l_msa_1(&l, p);
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), }
K1);
uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
uasm_l_msa_1(&l, p);
#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment