Commit 676a863c authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: selftests: Better XMM read/write helpers

set_xmm()/get_xmm() helpers are fairly useless as they only read 64 bits
from 128-bit registers. Moreover, these helpers are not used. Borrow
_kvm_read_sse_reg()/_kvm_write_sse_reg() from KVM limiting them to
XMM0-XMM8 for now.
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20221101145426.251680-31-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f4de6a1f
...@@ -603,71 +603,73 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature) ...@@ -603,71 +603,73 @@ static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
!this_cpu_has(feature.anti_feature); !this_cpu_has(feature.anti_feature);
} }
#define SET_XMM(__var, __xmm) \ typedef u32 __attribute__((vector_size(16))) sse128_t;
asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm) #define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
static inline void set_xmm(int n, unsigned long val) static inline void read_sse_reg(int reg, sse128_t *data)
{ {
switch (n) { switch (reg) {
case 0: case 0:
SET_XMM(val, xmm0); asm("movdqa %%xmm0, %0" : "=m"(*data));
break; break;
case 1: case 1:
SET_XMM(val, xmm1); asm("movdqa %%xmm1, %0" : "=m"(*data));
break; break;
case 2: case 2:
SET_XMM(val, xmm2); asm("movdqa %%xmm2, %0" : "=m"(*data));
break; break;
case 3: case 3:
SET_XMM(val, xmm3); asm("movdqa %%xmm3, %0" : "=m"(*data));
break; break;
case 4: case 4:
SET_XMM(val, xmm4); asm("movdqa %%xmm4, %0" : "=m"(*data));
break; break;
case 5: case 5:
SET_XMM(val, xmm5); asm("movdqa %%xmm5, %0" : "=m"(*data));
break; break;
case 6: case 6:
SET_XMM(val, xmm6); asm("movdqa %%xmm6, %0" : "=m"(*data));
break; break;
case 7: case 7:
SET_XMM(val, xmm7); asm("movdqa %%xmm7, %0" : "=m"(*data));
break; break;
default:
BUG();
} }
} }
#define GET_XMM(__xmm) \ static inline void write_sse_reg(int reg, const sse128_t *data)
({ \
unsigned long __val; \
asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
__val; \
})
static inline unsigned long get_xmm(int n)
{ {
assert(n >= 0 && n <= 7); switch (reg) {
switch (n) {
case 0: case 0:
return GET_XMM(xmm0); asm("movdqa %0, %%xmm0" : : "m"(*data));
break;
case 1: case 1:
return GET_XMM(xmm1); asm("movdqa %0, %%xmm1" : : "m"(*data));
break;
case 2: case 2:
return GET_XMM(xmm2); asm("movdqa %0, %%xmm2" : : "m"(*data));
break;
case 3: case 3:
return GET_XMM(xmm3); asm("movdqa %0, %%xmm3" : : "m"(*data));
break;
case 4: case 4:
return GET_XMM(xmm4); asm("movdqa %0, %%xmm4" : : "m"(*data));
break;
case 5: case 5:
return GET_XMM(xmm5); asm("movdqa %0, %%xmm5" : : "m"(*data));
break;
case 6: case 6:
return GET_XMM(xmm6); asm("movdqa %0, %%xmm6" : : "m"(*data));
break;
case 7: case 7:
return GET_XMM(xmm7); asm("movdqa %0, %%xmm7" : : "m"(*data));
break;
default:
BUG();
} }
/* never reached */
return 0;
} }
static inline void cpu_relax(void) static inline void cpu_relax(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment