Commit 0fcc1029 authored by Gavin Shan's avatar Gavin Shan Committed by Paolo Bonzini

KVM: selftests: Use getcpu() instead of sched_getcpu() in rseq_test

sched_getcpu() is glibc dependent and it can simply return the CPU
ID from the registered rseq information, as Florian Weimer pointed.
In this case, it's pointless to compare the return value from
sched_getcpu() and that fetched from the registered rseq information.

Fix the issue by replacing sched_getcpu() with getcpu(), as Florian
suggested. The comments are modified accordingly by replacing
"sched_getcpu()" with "getcpu()".
Reported-by: default avatarYihuang Yu <yihyu@redhat.com>
Suggested-by: default avatarFlorian Weimer <fweimer@redhat.com>
Suggested-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarGavin Shan <gshan@redhat.com>
Message-Id: <20220810104114.6838-3-gshan@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 66d42ac7
...@@ -41,6 +41,18 @@ static void guest_code(void) ...@@ -41,6 +41,18 @@ static void guest_code(void)
GUEST_SYNC(0); GUEST_SYNC(0);
} }
/*
* We have to perform direct system call for getcpu() because it's
* not available until glic 2.29.
*/
static void sys_getcpu(unsigned *cpu)
{
int r;
r = syscall(__NR_getcpu, cpu, NULL, NULL);
TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)", errno, strerror(errno));
}
static int next_cpu(int cpu) static int next_cpu(int cpu)
{ {
/* /*
...@@ -85,7 +97,7 @@ static void *migration_worker(void *__rseq_tid) ...@@ -85,7 +97,7 @@ static void *migration_worker(void *__rseq_tid)
atomic_inc(&seq_cnt); atomic_inc(&seq_cnt);
/* /*
* Ensure the odd count is visible while sched_getcpu() isn't * Ensure the odd count is visible while getcpu() isn't
* stable, i.e. while changing affinity is in-progress. * stable, i.e. while changing affinity is in-progress.
*/ */
smp_wmb(); smp_wmb();
...@@ -126,10 +138,10 @@ static void *migration_worker(void *__rseq_tid) ...@@ -126,10 +138,10 @@ static void *migration_worker(void *__rseq_tid)
* check completes. * check completes.
* *
* 3. To ensure the read-side makes efficient forward progress, * 3. To ensure the read-side makes efficient forward progress,
* e.g. if sched_getcpu() involves a syscall. Stalling the * e.g. if getcpu() involves a syscall. Stalling the read-side
* read-side means the test will spend more time waiting for * means the test will spend more time waiting for getcpu()
* sched_getcpu() to stabilize and less time trying to hit * to stabilize and less time trying to hit the timing-dependent
* the timing-dependent bug. * bug.
* *
* Because any bug in this area is likely to be timing-dependent, * Because any bug in this area is likely to be timing-dependent,
* run with a range of delays at 1us intervals from 1us to 10us * run with a range of delays at 1us intervals from 1us to 10us
...@@ -224,9 +236,9 @@ int main(int argc, char *argv[]) ...@@ -224,9 +236,9 @@ int main(int argc, char *argv[])
/* /*
* Verify rseq's CPU matches sched's CPU. Ensure migration * Verify rseq's CPU matches sched's CPU. Ensure migration
* doesn't occur between sched_getcpu() and reading the rseq * doesn't occur between getcpu() and reading the rseq cpu_id
* cpu_id by rereading both if the sequence count changes, or * by rereading both if the sequence count changes, or if the
* if the count is odd (migration in-progress). * count is odd (migration in-progress).
*/ */
do { do {
/* /*
...@@ -236,12 +248,12 @@ int main(int argc, char *argv[]) ...@@ -236,12 +248,12 @@ int main(int argc, char *argv[])
snapshot = atomic_read(&seq_cnt) & ~1; snapshot = atomic_read(&seq_cnt) & ~1;
/* /*
* Ensure reading sched_getcpu() and rseq.cpu_id * Ensure calling getcpu() and reading rseq.cpu_id complete
* complete in a single "no migration" window, i.e. are * in a single "no migration" window, i.e. are not reordered
* not reordered across the seq_cnt reads. * across the seq_cnt reads.
*/ */
smp_rmb(); smp_rmb();
cpu = sched_getcpu(); sys_getcpu(&cpu);
rseq_cpu = rseq_current_cpu_raw(); rseq_cpu = rseq_current_cpu_raw();
smp_rmb(); smp_rmb();
} while (snapshot != atomic_read(&seq_cnt)); } while (snapshot != atomic_read(&seq_cnt));
...@@ -253,9 +265,9 @@ int main(int argc, char *argv[]) ...@@ -253,9 +265,9 @@ int main(int argc, char *argv[])
/* /*
* Sanity check that the test was able to enter the guest a reasonable * Sanity check that the test was able to enter the guest a reasonable
* number of times, e.g. didn't get stalled too often/long waiting for * number of times, e.g. didn't get stalled too often/long waiting for
* sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a * getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a fairly
* fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs * conservative ratio on x86-64, which can do _more_ KVM_RUNs than
* than migrations given the 1us+ delay in the migration task. * migrations given the 1us+ delay in the migration task.
*/ */
TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2), TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2),
"Only performed %d KVM_RUNs, task stalled too much?\n", i); "Only performed %d KVM_RUNs, task stalled too much?\n", i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment