Commit 7812d80c authored by David Matlack's avatar David Matlack Committed by Sean Christopherson

KVM: selftests: Rename perf_test_util symbols to memstress

Replace the perf_test_ prefix on symbol names with memstress_ to match
the new file name.

"memstress" better describes the functionality proveded by this library,
which is to provide functionality for creating and running a VM that
stresses VM memory by reading and writing to guest memory on all vCPUs
in parallel.

"memstress" also contains the same number of chracters as "perf_test",
making it a drop-in replacement in symbols, e.g. function names, without
impacting line lengths. Also the lack of underscore between "mem" and
"stress" makes it clear "memstress" is a noun.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221012165729.3505266-4-dmatlack@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent a008a335
...@@ -126,7 +126,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn) ...@@ -126,7 +126,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
} }
static void mark_vcpu_memory_idle(struct kvm_vm *vm, static void mark_vcpu_memory_idle(struct kvm_vm *vm,
struct perf_test_vcpu_args *vcpu_args) struct memstress_vcpu_args *vcpu_args)
{ {
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t base_gva = vcpu_args->gva; uint64_t base_gva = vcpu_args->gva;
...@@ -148,7 +148,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, ...@@ -148,7 +148,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap."); TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");
for (page = 0; page < pages; page++) { for (page = 0; page < pages; page++) {
uint64_t gva = base_gva + page * perf_test_args.guest_page_size; uint64_t gva = base_gva + page * memstress_args.guest_page_size;
uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);
if (!pfn) { if (!pfn) {
...@@ -220,10 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration) ...@@ -220,10 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
return true; return true;
} }
static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args) static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm; struct kvm_vm *vm = memstress_args.vm;
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0; int current_iteration = 0;
...@@ -279,7 +279,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti ...@@ -279,7 +279,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti
static void access_memory(struct kvm_vm *vm, int nr_vcpus, static void access_memory(struct kvm_vm *vm, int nr_vcpus,
enum access_type access, const char *description) enum access_type access, const char *description)
{ {
perf_test_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100); memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
iteration_work = ITERATION_ACCESS_MEMORY; iteration_work = ITERATION_ACCESS_MEMORY;
run_iteration(vm, nr_vcpus, description); run_iteration(vm, nr_vcpus, description);
} }
...@@ -303,10 +303,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -303,10 +303,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm; struct kvm_vm *vm;
int nr_vcpus = params->nr_vcpus; int nr_vcpus = params->nr_vcpus;
vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1, vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access); params->backing_src, !overlap_memory_access);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main); memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
pr_info("\n"); pr_info("\n");
access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory"); access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
...@@ -324,8 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -324,8 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Set done to signal the vCPU threads to exit */ /* Set done to signal the vCPU threads to exit */
done = true; done = true;
perf_test_join_vcpu_threads(nr_vcpus); memstress_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm); memstress_destroy_vm(vm);
} }
static void help(char *name) static void help(char *name)
......
...@@ -42,7 +42,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; ...@@ -42,7 +42,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size; static size_t demand_paging_size;
static char *guest_data_prototype; static char *guest_data_prototype;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
...@@ -285,7 +285,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -285,7 +285,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct kvm_vm *vm; struct kvm_vm *vm;
int r, i; int r, i;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access); p->src_type, p->partition_vcpu_memory_access);
demand_paging_size = get_backing_src_pagesz(p->src_type); demand_paging_size = get_backing_src_pagesz(p->src_type);
...@@ -307,11 +307,11 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -307,11 +307,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd"); TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
for (i = 0; i < nr_vcpus; i++) { for (i = 0; i < nr_vcpus; i++) {
struct perf_test_vcpu_args *vcpu_args; struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva; void *vcpu_hva;
void *vcpu_alias; void *vcpu_alias;
vcpu_args = &perf_test_args.vcpu_args[i]; vcpu_args = &memstress_args.vcpu_args[i];
/* Cache the host addresses of the region */ /* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa); vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
...@@ -329,17 +329,17 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -329,17 +329,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds[i * 2], p->uffd_mode, pipefds[i * 2], p->uffd_mode,
p->uffd_delay, &uffd_args[i], p->uffd_delay, &uffd_args[i],
vcpu_hva, vcpu_alias, vcpu_hva, vcpu_alias,
vcpu_args->pages * perf_test_args.guest_page_size); vcpu_args->pages * memstress_args.guest_page_size);
} }
} }
pr_info("Finished creating vCPUs and starting uffd threads\n"); pr_info("Finished creating vCPUs and starting uffd threads\n");
clock_gettime(CLOCK_MONOTONIC, &start); clock_gettime(CLOCK_MONOTONIC, &start);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n"); pr_info("Started all vCPUs\n");
perf_test_join_vcpu_threads(nr_vcpus); memstress_join_vcpu_threads(nr_vcpus);
ts_diff = timespec_elapsed(start); ts_diff = timespec_elapsed(start);
pr_info("All vCPU threads joined\n"); pr_info("All vCPU threads joined\n");
...@@ -358,10 +358,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -358,10 +358,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Total guest execution time: %ld.%.9lds\n", pr_info("Total guest execution time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec); ts_diff.tv_sec, ts_diff.tv_nsec);
pr_info("Overall demand paging rate: %f pgs/sec\n", pr_info("Overall demand paging rate: %f pgs/sec\n",
perf_test_args.vcpu_args[0].pages * nr_vcpus / memstress_args.vcpu_args[0].pages * nr_vcpus /
((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0)); ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
perf_test_destroy_vm(vm); memstress_destroy_vm(vm);
free(guest_data_prototype); free(guest_data_prototype);
if (p->uffd_mode) { if (p->uffd_mode) {
......
...@@ -67,7 +67,7 @@ static bool host_quit; ...@@ -67,7 +67,7 @@ static bool host_quit;
static int iteration; static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
int vcpu_idx = vcpu_args->vcpu_idx; int vcpu_idx = vcpu_args->vcpu_idx;
...@@ -141,7 +141,7 @@ static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable) ...@@ -141,7 +141,7 @@ static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
int i; int i;
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i; int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0; int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
vm_mem_region_set_flags(vm, slot, flags); vm_mem_region_set_flags(vm, slot, flags);
...@@ -163,7 +163,7 @@ static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots ...@@ -163,7 +163,7 @@ static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots
int i; int i;
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i; int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_get_dirty_log(vm, slot, bitmaps[i]); kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
} }
...@@ -175,7 +175,7 @@ static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], ...@@ -175,7 +175,7 @@ static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
int i; int i;
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
int slot = PERF_TEST_MEM_SLOT_INDEX + i; int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot); kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
} }
...@@ -223,13 +223,13 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -223,13 +223,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec clear_dirty_log_total = (struct timespec){0}; struct timespec clear_dirty_log_total = (struct timespec){0};
int i; int i;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src, p->slots, p->backing_src,
p->partition_vcpu_memory_access); p->partition_vcpu_memory_access);
pr_info("Random seed: %u\n", p->random_seed); pr_info("Random seed: %u\n", p->random_seed);
perf_test_set_random_seed(vm, p->random_seed); memstress_set_random_seed(vm, p->random_seed);
perf_test_set_write_percent(vm, p->write_percent); memstress_set_write_percent(vm, p->write_percent);
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift; guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
...@@ -259,9 +259,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -259,9 +259,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* occurring during the dirty memory iterations below, which * occurring during the dirty memory iterations below, which
* would pollute the performance results. * would pollute the performance results.
*/ */
perf_test_set_write_percent(vm, 100); memstress_set_write_percent(vm, 100);
perf_test_set_random_access(vm, false); memstress_set_random_access(vm, false);
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */ /* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration); pr_debug("Starting iteration %d - Populating\n", iteration);
...@@ -282,8 +282,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -282,8 +282,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n", pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec); ts_diff.tv_sec, ts_diff.tv_nsec);
perf_test_set_write_percent(vm, p->write_percent); memstress_set_write_percent(vm, p->write_percent);
perf_test_set_random_access(vm, p->random_access); memstress_set_random_access(vm, p->random_access);
while (iteration < p->iterations) { while (iteration < p->iterations) {
/* /*
...@@ -345,7 +345,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -345,7 +345,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* wait for them to exit. * wait for them to exit.
*/ */
host_quit = true; host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus); memstress_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations); avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n", pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
...@@ -361,7 +361,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -361,7 +361,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
free_bitmaps(bitmaps, p->slots); free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm); arch_cleanup_vm(vm);
perf_test_destroy_vm(vm); memstress_destroy_vm(vm);
} }
static void help(char *name) static void help(char *name)
...@@ -466,7 +466,7 @@ int main(int argc, char *argv[]) ...@@ -466,7 +466,7 @@ int main(int argc, char *argv[])
guest_modes_cmdline(optarg); guest_modes_cmdline(optarg);
break; break;
case 'n': case 'n':
perf_test_args.nested = true; memstress_args.nested = true;
break; break;
case 'o': case 'o':
p.partition_vcpu_memory_access = false; p.partition_vcpu_memory_access = false;
...@@ -500,9 +500,9 @@ int main(int argc, char *argv[]) ...@@ -500,9 +500,9 @@ int main(int argc, char *argv[])
} }
if (pcpu_list) { if (pcpu_list) {
kvm_parse_vcpu_pinning(pcpu_list, perf_test_args.vcpu_to_pcpu, kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
nr_vcpus); nr_vcpus);
perf_test_args.pin_vcpus = true; memstress_args.pin_vcpus = true;
} }
TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations"); TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
#define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */ #define DEFAULT_PER_VCPU_MEM_SIZE (1 << 30) /* 1G */
#define PERF_TEST_MEM_SLOT_INDEX 1 #define MEMSTRESS_MEM_SLOT_INDEX 1
struct perf_test_vcpu_args { struct memstress_vcpu_args {
uint64_t gpa; uint64_t gpa;
uint64_t gva; uint64_t gva;
uint64_t pages; uint64_t pages;
...@@ -29,7 +29,7 @@ struct perf_test_vcpu_args { ...@@ -29,7 +29,7 @@ struct perf_test_vcpu_args {
int vcpu_idx; int vcpu_idx;
}; };
struct perf_test_args { struct memstress_args {
struct kvm_vm *vm; struct kvm_vm *vm;
/* The starting address and size of the guest test region. */ /* The starting address and size of the guest test region. */
uint64_t gpa; uint64_t gpa;
...@@ -47,26 +47,26 @@ struct perf_test_args { ...@@ -47,26 +47,26 @@ struct perf_test_args {
/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */ /* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS]; uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];
struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS]; struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
}; };
extern struct perf_test_args perf_test_args; extern struct memstress_args memstress_args;
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots, uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src, enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access); bool partition_vcpu_memory_access);
void perf_test_destroy_vm(struct kvm_vm *vm); void memstress_destroy_vm(struct kvm_vm *vm);
void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent); void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed); void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
void perf_test_set_random_access(struct kvm_vm *vm, bool random_access); void memstress_set_random_access(struct kvm_vm *vm, bool random_access);
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *)); void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus); void memstress_join_vcpu_threads(int vcpus);
void perf_test_guest_code(uint32_t vcpu_id); void memstress_guest_code(uint32_t vcpu_id);
uint64_t perf_test_nested_pages(int nr_vcpus); uint64_t memstress_nested_pages(int nr_vcpus);
void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]); void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
#endif /* SELFTEST_KVM_MEMSTRESS_H */ #endif /* SELFTEST_KVM_MEMSTRESS_H */
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include "memstress.h" #include "memstress.h"
#include "processor.h" #include "processor.h"
struct perf_test_args perf_test_args; struct memstress_args memstress_args;
/* /*
* Guest virtual memory offset of the testing memory slot. * Guest virtual memory offset of the testing memory slot.
...@@ -33,7 +33,7 @@ struct vcpu_thread { ...@@ -33,7 +33,7 @@ struct vcpu_thread {
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS]; static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */ /* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *); static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */ /* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running; static bool all_vcpu_threads_running;
...@@ -44,10 +44,10 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; ...@@ -44,10 +44,10 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
* Continuously write to the first 8 bytes of each page in the * Continuously write to the first 8 bytes of each page in the
* specified region. * specified region.
*/ */
void perf_test_guest_code(uint32_t vcpu_idx) void memstress_guest_code(uint32_t vcpu_idx)
{ {
struct perf_test_args *args = &perf_test_args; struct memstress_args *args = &memstress_args;
struct perf_test_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
struct guest_random_state rand_state; struct guest_random_state rand_state;
uint64_t gva; uint64_t gva;
uint64_t pages; uint64_t pages;
...@@ -82,13 +82,13 @@ void perf_test_guest_code(uint32_t vcpu_idx) ...@@ -82,13 +82,13 @@ void perf_test_guest_code(uint32_t vcpu_idx)
} }
} }
void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
struct kvm_vcpu *vcpus[], struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes, uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access) bool partition_vcpu_memory_access)
{ {
struct perf_test_args *args = &perf_test_args; struct memstress_args *args = &memstress_args;
struct perf_test_vcpu_args *vcpu_args; struct memstress_vcpu_args *vcpu_args;
int i; int i;
for (i = 0; i < nr_vcpus; i++) { for (i = 0; i < nr_vcpus; i++) {
...@@ -118,12 +118,12 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, ...@@ -118,12 +118,12 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
} }
} }
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots, uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src, enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access) bool partition_vcpu_memory_access)
{ {
struct perf_test_args *args = &perf_test_args; struct memstress_args *args = &memstress_args;
struct kvm_vm *vm; struct kvm_vm *vm;
uint64_t guest_num_pages, slot0_pages = 0; uint64_t guest_num_pages, slot0_pages = 0;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
...@@ -157,7 +157,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, ...@@ -157,7 +157,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* in-memory data structures. * in-memory data structures.
*/ */
if (args->nested) if (args->nested)
slot0_pages += perf_test_nested_pages(nr_vcpus); slot0_pages += memstress_nested_pages(nr_vcpus);
/* /*
* Pass guest_num_pages to populate the page tables for test memory. * Pass guest_num_pages to populate the page tables for test memory.
...@@ -165,7 +165,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, ...@@ -165,7 +165,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
* effect as KVM allows aliasing HVAs in meslots. * effect as KVM allows aliasing HVAs in meslots.
*/ */
vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages, vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
perf_test_guest_code, vcpus); memstress_guest_code, vcpus);
args->vm = vm; args->vm = vm;
...@@ -206,59 +206,59 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, ...@@ -206,59 +206,59 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;
vm_userspace_mem_region_add(vm, backing_src, region_start, vm_userspace_mem_region_add(vm, backing_src, region_start,
PERF_TEST_MEM_SLOT_INDEX + i, MEMSTRESS_MEM_SLOT_INDEX + i,
region_pages, 0); region_pages, 0);
} }
/* Do mapping for the demand paging memory slot */ /* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages); virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);
perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes, memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
partition_vcpu_memory_access); partition_vcpu_memory_access);
if (args->nested) { if (args->nested) {
pr_info("Configuring vCPUs to run in L2 (nested).\n"); pr_info("Configuring vCPUs to run in L2 (nested).\n");
perf_test_setup_nested(vm, nr_vcpus, vcpus); memstress_setup_nested(vm, nr_vcpus, vcpus);
} }
ucall_init(vm, NULL); ucall_init(vm, NULL);
/* Export the shared variables to the guest. */ /* Export the shared variables to the guest. */
sync_global_to_guest(vm, perf_test_args); sync_global_to_guest(vm, memstress_args);
return vm; return vm;
} }
void perf_test_destroy_vm(struct kvm_vm *vm) void memstress_destroy_vm(struct kvm_vm *vm)
{ {
ucall_uninit(vm); ucall_uninit(vm);
kvm_vm_free(vm); kvm_vm_free(vm);
} }
void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
{ {
perf_test_args.write_percent = write_percent; memstress_args.write_percent = write_percent;
sync_global_to_guest(vm, perf_test_args.write_percent); sync_global_to_guest(vm, memstress_args.write_percent);
} }
void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed) void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
{ {
perf_test_args.random_seed = random_seed; memstress_args.random_seed = random_seed;
sync_global_to_guest(vm, perf_test_args.random_seed); sync_global_to_guest(vm, memstress_args.random_seed);
} }
void perf_test_set_random_access(struct kvm_vm *vm, bool random_access) void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
{ {
perf_test_args.random_access = random_access; memstress_args.random_access = random_access;
sync_global_to_guest(vm, perf_test_args.random_access); sync_global_to_guest(vm, memstress_args.random_access);
} }
uint64_t __weak perf_test_nested_pages(int nr_vcpus) uint64_t __weak memstress_nested_pages(int nr_vcpus)
{ {
return 0; return 0;
} }
void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus) void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{ {
pr_info("%s() not support on this architecture, skipping.\n", __func__); pr_info("%s() not support on this architecture, skipping.\n", __func__);
exit(KSFT_SKIP); exit(KSFT_SKIP);
...@@ -269,8 +269,8 @@ static void *vcpu_thread_main(void *data) ...@@ -269,8 +269,8 @@ static void *vcpu_thread_main(void *data)
struct vcpu_thread *vcpu = data; struct vcpu_thread *vcpu = data;
int vcpu_idx = vcpu->vcpu_idx; int vcpu_idx = vcpu->vcpu_idx;
if (perf_test_args.pin_vcpus) if (memstress_args.pin_vcpus)
kvm_pin_this_task_to_pcpu(perf_test_args.vcpu_to_pcpu[vcpu_idx]); kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true); WRITE_ONCE(vcpu->running, true);
...@@ -283,13 +283,13 @@ static void *vcpu_thread_main(void *data) ...@@ -283,13 +283,13 @@ static void *vcpu_thread_main(void *data)
while (!READ_ONCE(all_vcpu_threads_running)) while (!READ_ONCE(all_vcpu_threads_running))
; ;
vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu_idx]); vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);
return NULL; return NULL;
} }
void perf_test_start_vcpu_threads(int nr_vcpus, void memstress_start_vcpu_threads(int nr_vcpus,
void (*vcpu_fn)(struct perf_test_vcpu_args *)) void (*vcpu_fn)(struct memstress_vcpu_args *))
{ {
int i; int i;
...@@ -313,7 +313,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus, ...@@ -313,7 +313,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus,
WRITE_ONCE(all_vcpu_threads_running, true); WRITE_ONCE(all_vcpu_threads_running, true);
} }
void perf_test_join_vcpu_threads(int nr_vcpus) void memstress_join_vcpu_threads(int nr_vcpus)
{ {
int i; int i;
......
...@@ -15,21 +15,21 @@ ...@@ -15,21 +15,21 @@
#include "processor.h" #include "processor.h"
#include "vmx.h" #include "vmx.h"
void perf_test_l2_guest_code(uint64_t vcpu_id) void memstress_l2_guest_code(uint64_t vcpu_id)
{ {
perf_test_guest_code(vcpu_id); memstress_guest_code(vcpu_id);
vmcall(); vmcall();
} }
extern char perf_test_l2_guest_entry[]; extern char memstress_l2_guest_entry[];
__asm__( __asm__(
"perf_test_l2_guest_entry:" "memstress_l2_guest_entry:"
" mov (%rsp), %rdi;" " mov (%rsp), %rdi;"
" call perf_test_l2_guest_code;" " call memstress_l2_guest_code;"
" ud2;" " ud2;"
); );
static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
{ {
#define L2_GUEST_STACK_SIZE 64 #define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
...@@ -42,14 +42,14 @@ static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) ...@@ -42,14 +42,14 @@ static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1]; rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
*rsp = vcpu_id; *rsp = vcpu_id;
prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp); prepare_vmcs(vmx, memstress_l2_guest_entry, rsp);
GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_DONE(); GUEST_DONE();
} }
uint64_t perf_test_nested_pages(int nr_vcpus) uint64_t memstress_nested_pages(int nr_vcpus)
{ {
/* /*
* 513 page tables is enough to identity-map 256 TiB of L2 with 1G * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
...@@ -59,7 +59,7 @@ uint64_t perf_test_nested_pages(int nr_vcpus) ...@@ -59,7 +59,7 @@ uint64_t perf_test_nested_pages(int nr_vcpus)
return 513 + 10 * nr_vcpus; return 513 + 10 * nr_vcpus;
} }
void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
{ {
uint64_t start, end; uint64_t start, end;
...@@ -72,12 +72,12 @@ void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) ...@@ -72,12 +72,12 @@ void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
*/ */
nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
start = align_down(perf_test_args.gpa, PG_SIZE_1G); start = align_down(memstress_args.gpa, PG_SIZE_1G);
end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G); end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G);
nested_identity_map_1g(vmx, vm, start, end - start); nested_identity_map_1g(vmx, vm, start, end - start);
} }
void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
{ {
struct vmx_pages *vmx, *vmx0 = NULL; struct vmx_pages *vmx, *vmx0 = NULL;
struct kvm_regs regs; struct kvm_regs regs;
...@@ -90,7 +90,7 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc ...@@ -90,7 +90,7 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
vmx = vcpu_alloc_vmx(vm, &vmx_gva); vmx = vcpu_alloc_vmx(vm, &vmx_gva);
if (vcpu_id == 0) { if (vcpu_id == 0) {
perf_test_setup_ept(vmx, vm); memstress_setup_ept(vmx, vm);
vmx0 = vmx; vmx0 = vmx;
} else { } else {
/* Share the same EPT table across all vCPUs. */ /* Share the same EPT table across all vCPUs. */
...@@ -100,11 +100,11 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc ...@@ -100,11 +100,11 @@ void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vc
} }
/* /*
* Override the vCPU to run perf_test_l1_guest_code() which will * Override the vCPU to run memstress_l1_guest_code() which will
* bounce it into L2 before calling perf_test_guest_code(). * bounce it into L2 before calling memstress_guest_code().
*/ */
vcpu_regs_get(vcpus[vcpu_id], &regs); vcpu_regs_get(vcpus[vcpu_id], &regs);
regs.rip = (unsigned long) perf_test_l1_guest_code; regs.rip = (unsigned long) memstress_l1_guest_code;
vcpu_regs_set(vcpus[vcpu_id], &regs); vcpu_regs_set(vcpus[vcpu_id], &regs);
vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
} }
......
...@@ -36,7 +36,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; ...@@ -36,7 +36,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static bool run_vcpus = true; static bool run_vcpus = true;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args) static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{ {
struct kvm_vcpu *vcpu = vcpu_args->vcpu; struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_run *run; struct kvm_run *run;
...@@ -75,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, ...@@ -75,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
* Add the dummy memslot just below the memstress memslot, which is * Add the dummy memslot just below the memstress memslot, which is
* at the top of the guest physical address space. * at the top of the guest physical address space.
*/ */
gpa = perf_test_args.gpa - pages * vm->page_size; gpa = memstress_args.gpa - pages * vm->page_size;
for (i = 0; i < nr_modifications; i++) { for (i = 0; i < nr_modifications; i++) {
usleep(delay); usleep(delay);
...@@ -97,13 +97,13 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -97,13 +97,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct test_params *p = arg; struct test_params *p = arg;
struct kvm_vm *vm; struct kvm_vm *vm;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1, vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
VM_MEM_SRC_ANONYMOUS, VM_MEM_SRC_ANONYMOUS,
p->partition_vcpu_memory_access); p->partition_vcpu_memory_access);
pr_info("Finished creating vCPUs\n"); pr_info("Finished creating vCPUs\n");
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker); memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
pr_info("Started all vCPUs\n"); pr_info("Started all vCPUs\n");
...@@ -111,10 +111,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) ...@@ -111,10 +111,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
run_vcpus = false; run_vcpus = false;
perf_test_join_vcpu_threads(nr_vcpus); memstress_join_vcpu_threads(nr_vcpus);
pr_info("All vCPU threads joined\n"); pr_info("All vCPU threads joined\n");
perf_test_destroy_vm(vm); memstress_destroy_vm(vm);
} }
static void help(char *name) static void help(char *name)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment