Commit b2626f1e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more kvm fixes from Paolo Bonzini:
 "Small x86 fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: selftests: Ensure all migrations are performed when test is affined
  KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks
  ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm
  x86/kvmclock: Move this_cpu_pvti into kvmclock.h
  selftests: KVM: Don't clobber XMM register when read
  KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue
parents 24f67d82 7b0035ea
...@@ -2,6 +2,20 @@ ...@@ -2,6 +2,20 @@
#ifndef _ASM_X86_KVM_CLOCK_H #ifndef _ASM_X86_KVM_CLOCK_H
#define _ASM_X86_KVM_CLOCK_H #define _ASM_X86_KVM_CLOCK_H
#include <linux/percpu.h>
extern struct clocksource kvm_clock; extern struct clocksource kvm_clock;
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
#endif /* _ASM_X86_KVM_CLOCK_H */ #endif /* _ASM_X86_KVM_CLOCK_H */
...@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); ...@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
static struct pvclock_vsyscall_time_info static struct pvclock_vsyscall_time_info
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static struct pvclock_wall_clock wall_clock __bss_decrypted; static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static struct pvclock_vsyscall_time_info *hvclock_mem; static struct pvclock_vsyscall_time_info *hvclock_mem;
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
{
return this_cpu_read(hv_clock_per_cpu);
}
/* /*
* The wallclock is the time of day when we booted. Since then, some time may * The wallclock is the time of day when we booted. Since then, some time may
......
...@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( ...@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
for (i = 0; i < nent; i++) { for (i = 0; i < nent; i++) {
e = &entries[i]; e = &entries[i];
if (e->function == function && (e->index == index || if (e->function == function &&
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
return e; return e;
} }
......
...@@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) ...@@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
*/ */
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
if (tsx_ctrl) if (tsx_ctrl)
vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
} }
err = alloc_loaded_vmcs(&vmx->vmcs01); err = alloc_loaded_vmcs(&vmx->vmcs01);
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/ptp_kvm.h> #include <linux/ptp_kvm.h>
struct pvclock_vsyscall_time_info *hv_clock;
static phys_addr_t clock_pair_gpa; static phys_addr_t clock_pair_gpa;
static struct kvm_clock_pairing clock_pair; static struct kvm_clock_pairing clock_pair;
...@@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void) ...@@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void)
return -ENODEV; return -ENODEV;
clock_pair_gpa = slow_virt_to_phys(&clock_pair); clock_pair_gpa = slow_virt_to_phys(&clock_pair);
hv_clock = pvclock_get_pvti_cpu0_va(); if (!pvclock_get_pvti_cpu0_va())
if (!hv_clock)
return -ENODEV; return -ENODEV;
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
...@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, ...@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
struct pvclock_vcpu_time_info *src; struct pvclock_vcpu_time_info *src;
unsigned int version; unsigned int version;
long ret; long ret;
int cpu;
cpu = smp_processor_id(); src = this_cpu_pvti();
src = &hv_clock[cpu].pvti;
do { do {
/* /*
......
...@@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val) ...@@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val)
#define GET_XMM(__xmm) \ #define GET_XMM(__xmm) \
({ \ ({ \
unsigned long __val; \ unsigned long __val; \
asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \ asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
__val; \ __val; \
}) })
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <signal.h> #include <signal.h>
#include <syscall.h> #include <syscall.h>
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <sys/sysinfo.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/rseq.h> #include <linux/rseq.h>
...@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = { ...@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
static pthread_t migration_thread; static pthread_t migration_thread;
static cpu_set_t possible_mask; static cpu_set_t possible_mask;
static int min_cpu, max_cpu;
static bool done; static bool done;
static atomic_t seq_cnt; static atomic_t seq_cnt;
...@@ -57,20 +59,37 @@ static void sys_rseq(int flags) ...@@ -57,20 +59,37 @@ static void sys_rseq(int flags)
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
} }
static int next_cpu(int cpu)
{
/*
* Advance to the next CPU, skipping those that weren't in the original
* affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
* data storage is considered as opaque. Note, if this task is pinned
* to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
* burn a lot cycles and the test will take longer than normal to
* complete.
*/
do {
cpu++;
if (cpu > max_cpu) {
cpu = min_cpu;
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
"Min CPU = %d must always be usable", cpu);
break;
}
} while (!CPU_ISSET(cpu, &possible_mask));
return cpu;
}
static void *migration_worker(void *ign) static void *migration_worker(void *ign)
{ {
cpu_set_t allowed_mask; cpu_set_t allowed_mask;
int r, i, nr_cpus, cpu; int r, i, cpu;
CPU_ZERO(&allowed_mask); CPU_ZERO(&allowed_mask);
nr_cpus = CPU_COUNT(&possible_mask); for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
cpu = i % nr_cpus;
if (!CPU_ISSET(cpu, &possible_mask))
continue;
CPU_SET(cpu, &allowed_mask); CPU_SET(cpu, &allowed_mask);
/* /*
...@@ -154,6 +173,36 @@ static void *migration_worker(void *ign) ...@@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
return NULL; return NULL;
} }
static int calc_min_max_cpu(void)
{
int i, cnt, nproc;
if (CPU_COUNT(&possible_mask) < 2)
return -EINVAL;
/*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
* this task is affined to in order to reduce the time spent querying
* unusable CPUs, e.g. if this task is pinned to a small percentage of
* total CPUs.
*/
nproc = get_nprocs_conf();
min_cpu = -1;
max_cpu = -1;
cnt = 0;
for (i = 0; i < nproc; i++) {
if (!CPU_ISSET(i, &possible_mask))
continue;
if (min_cpu == -1)
min_cpu = i;
max_cpu = i;
cnt++;
}
return (cnt < 2) ? -EINVAL : 0;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
int r, i, snapshot; int r, i, snapshot;
...@@ -167,8 +216,8 @@ int main(int argc, char *argv[]) ...@@ -167,8 +216,8 @@ int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno)); strerror(errno));
if (CPU_COUNT(&possible_mask) < 2) { if (calc_min_max_cpu()) {
print_skip("Only one CPU, task migration not possible\n"); print_skip("Only one usable CPU, task migration not possible");
exit(KSFT_SKIP); exit(KSFT_SKIP);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment