Commit 9ec1eb1b authored by Oliver Upton's avatar Oliver Upton Committed by Marc Zyngier

KVM: selftests: Have perf_test_util signal when to stop vCPUs

Signal that a test run is complete through perf_test_args instead of
having tests open code a similar solution. Ensure that the field resets
to false at the beginning of a test run as the structure is reused
between test runs, eliminating a couple of bugs:

access_tracking_perf_test hangs indefinitely on a subsequent test run,
as 'done' remains true. The bug doesn't amount to much right now, as x86
supports a single guest mode. However, this is a precondition of
enabling the test for other architectures with >1 guest mode, like
arm64.

memslot_modification_stress_test has the exact opposite problem, where
subsequent test runs complete immediately as 'run_vcpus' remains false.
Co-developed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
[oliver: added commit message, preserve spin_wait_for_next_iteration()]
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
Reviewed-by: default avatarGavin Shan <gshan@redhat.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221118211503.4049023-2-oliver.upton@linux.dev
parent 30a0b95b
...@@ -58,9 +58,6 @@ static enum { ...@@ -58,9 +58,6 @@ static enum {
ITERATION_MARK_IDLE, ITERATION_MARK_IDLE,
} iteration_work; } iteration_work;
/* Set to true when vCPU threads should exit. */
static bool done;
/* The iteration that was last completed by each vCPU. */ /* The iteration that was last completed by each vCPU. */
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
...@@ -211,7 +208,7 @@ static bool spin_wait_for_next_iteration(int *current_iteration) ...@@ -211,7 +208,7 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
int last_iteration = *current_iteration; int last_iteration = *current_iteration;
do { do {
if (READ_ONCE(done)) if (READ_ONCE(perf_test_args.stop_vcpus))
return false; return false;
*current_iteration = READ_ONCE(iteration); *current_iteration = READ_ONCE(iteration);
...@@ -321,9 +318,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -321,9 +318,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
mark_memory_idle(vm, nr_vcpus); mark_memory_idle(vm, nr_vcpus);
access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory"); access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
/* Set done to signal the vCPU threads to exit */
done = true;
perf_test_join_vcpu_threads(nr_vcpus); perf_test_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm); perf_test_destroy_vm(vm);
} }
......
...@@ -40,6 +40,9 @@ struct perf_test_args { ...@@ -40,6 +40,9 @@ struct perf_test_args {
/* Run vCPUs in L2 instead of L1, if the architecture supports it. */ /* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested; bool nested;
/* Test is done, stop running vCPUs. */
bool stop_vcpus;
struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS]; struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
}; };
......
...@@ -267,6 +267,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus, ...@@ -267,6 +267,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus,
vcpu_thread_fn = vcpu_fn; vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false); WRITE_ONCE(all_vcpu_threads_running, false);
WRITE_ONCE(perf_test_args.stop_vcpus, false);
for (i = 0; i < nr_vcpus; i++) { for (i = 0; i < nr_vcpus; i++) {
struct vcpu_thread *vcpu = &vcpu_threads[i]; struct vcpu_thread *vcpu = &vcpu_threads[i];
...@@ -289,6 +290,8 @@ void perf_test_join_vcpu_threads(int nr_vcpus) ...@@ -289,6 +290,8 @@ void perf_test_join_vcpu_threads(int nr_vcpus)
{ {
int i; int i;
WRITE_ONCE(perf_test_args.stop_vcpus, true);
for (i = 0; i < nr_vcpus; i++) for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL); pthread_join(vcpu_threads[i].thread, NULL);
} }
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
static int nr_vcpus = 1; static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
...@@ -45,7 +43,7 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) ...@@ -45,7 +43,7 @@ static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
run = vcpu->run; run = vcpu->run;
/* Let the guest access its memory until a stop signal is received */ /* Let the guest access its memory until a stop signal is received */
while (READ_ONCE(run_vcpus)) { while (!READ_ONCE(perf_test_args.stop_vcpus)) {
ret = _vcpu_run(vcpu); ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
...@@ -110,8 +108,6 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -110,8 +108,6 @@ static void run_test(enum vm_guest_mode mode, void *arg)
add_remove_memslot(vm, p->memslot_modification_delay, add_remove_memslot(vm, p->memslot_modification_delay,
p->nr_memslot_modifications); p->nr_memslot_modifications);
run_vcpus = false;
perf_test_join_vcpu_threads(nr_vcpus); perf_test_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n"); pr_info("All vCPU threads joined\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment