Commit 94391516 authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

#4443 measure multithread performance of malloc and free refs[t:4443]

git-svn-id: file:///svn/toku/tokudb@39619 c7de825b-a66e-492c-adef-691d508d4ae1
parent 64f103e0
......@@ -158,9 +158,11 @@ BDB_DONTRUN_TESTS = \
multiprocess \
mvcc-create-table \
mvcc-many-committed \
perf_checkpoint_var \
perf_malloc_free \
perf_nop \
perf_ptquery \
perf_checkpoint_var \
perf_xmalloc_free \
prelock-read-read \
prelock-read-write \
prelock-write-read \
......@@ -1049,6 +1051,9 @@ maxsize-for-loader-A.tdbrun: maxsize-for-loader.tdb
$(TDBVGRIND) ./$< -e $@ -f 2> /dev/null $(SUMMARIZE_CMD)
maxsize-for-loader-B.tdbrun: maxsize-for-loader.tdb
./$< -e $@ 2> /dev/null $(SUMMARIZE_CMD)
perf%.tdb: CPPFLAGS+=-DDONT_DEPRECATE_MALLOC
clean:
rm -f $(ALL_BINS)
rm -rf dir.* *.check.output *.check.valgrind
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 39258 2012-01-27 13:51:58Z zardosht $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
//
// This test is a form of stress that does operations on a single dictionary:
// We create a dictionary bigger than the cachetable (around 4x greater).
// Then, we spawn a bunch of pthreads that do the following:
// - scan dictionary forward with bulk fetch
// - scan dictionary forward slowly
// - scan dictionary backward with bulk fetch
// - scan dictionary backward slowly
// - Grow the dictionary with insertions
// - do random point queries into the dictionary
// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
// work correctly by verifying that table scans sum their vals to 0.
//
// This does NOT test:
// - splits and merges
// - multiple DBs
//
// Variables that are interesting to tweak and run:
// - small cachetable
// - number of elements
//
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
int n = cli_args->num_elements;
//
// the threads that we want:
// - some threads constantly updating random values
// - one thread doing table scan with bulk fetch
// - one thread doing table scan without bulk fetch
// - some threads doing random point queries
//
if (verbose) printf("starting creation of pthreads\n");
const int num_threads = cli_args->num_ptquery_threads;
struct arg myargs[num_threads];
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], n, dbp, env, cli_args);
}
for (int i = 0; i < num_threads; i++) {
myargs[i].operation = malloc_free_op;
}
run_workers(myargs, num_threads, cli_args->time_of_test, false, cli_args);
}
int
test_main(int argc, char *const argv[]) {
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
stress_test_main(&args);
return 0;
}
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 39258 2012-01-27 13:51:58Z zardosht $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
//
// This test is a form of stress that does operations on a single dictionary:
// We create a dictionary bigger than the cachetable (around 4x greater).
// Then, we spawn a bunch of pthreads that do the following:
// - scan dictionary forward with bulk fetch
// - scan dictionary forward slowly
// - scan dictionary backward with bulk fetch
// - scan dictionary backward slowly
// - Grow the dictionary with insertions
// - do random point queries into the dictionary
// With the small cachetable, this should produce quite a bit of churn in reading in and evicting nodes.
// If the test runs to completion without crashing, we consider it a success. It also tests that snapshots
// work correctly by verifying that table scans sum their vals to 0.
//
// This does NOT test:
// - splits and merges
// - multiple DBs
//
// Variables that are interesting to tweak and run:
// - small cachetable
// - number of elements
//
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
int n = cli_args->num_elements;
//
// the threads that we want:
// - some threads constantly updating random values
// - one thread doing table scan with bulk fetch
// - one thread doing table scan without bulk fetch
// - some threads doing random point queries
//
if (verbose) printf("starting creation of pthreads\n");
const int num_threads = cli_args->num_ptquery_threads;
struct arg myargs[num_threads];
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], n, dbp, env, cli_args);
}
for (int i = 0; i < num_threads; i++) {
myargs[i].operation = xmalloc_free_op;
}
run_workers(myargs, num_threads, cli_args->time_of_test, false, cli_args);
}
int
test_main(int argc, char *const argv[]) {
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
stress_test_main(&args);
return 0;
}
......@@ -121,7 +121,7 @@ struct worker_extra {
static void lock_worker_op(struct worker_extra* we) {
ARG arg = we->thread_arg;
if (arg->lock_type != STRESS_LOCK_NONE) {
toku_pthread_mutex_lock(we->operation_lock_mutex);
if (0) toku_pthread_mutex_lock(we->operation_lock_mutex);
if (arg->lock_type == STRESS_LOCK_SHARED) {
rwlock_read_lock(we->operation_lock, we->operation_lock_mutex);
} else if (arg->lock_type == STRESS_LOCK_EXCL) {
......@@ -129,14 +129,14 @@ static void lock_worker_op(struct worker_extra* we) {
} else {
assert(false);
}
toku_pthread_mutex_unlock(we->operation_lock_mutex);
if (0) toku_pthread_mutex_unlock(we->operation_lock_mutex);
}
}
static void unlock_worker_op(struct worker_extra* we) {
ARG arg = we->thread_arg;
if (arg->lock_type != STRESS_LOCK_NONE) {
toku_pthread_mutex_lock(we->operation_lock_mutex);
if (0) toku_pthread_mutex_lock(we->operation_lock_mutex);
if (arg->lock_type == STRESS_LOCK_SHARED) {
rwlock_read_unlock(we->operation_lock);
} else if (arg->lock_type == STRESS_LOCK_EXCL) {
......@@ -144,7 +144,7 @@ static void unlock_worker_op(struct worker_extra* we) {
} else {
assert(false);
}
toku_pthread_mutex_unlock(we->operation_lock_mutex);
if (0) toku_pthread_mutex_unlock(we->operation_lock_mutex);
}
}
......@@ -286,6 +286,20 @@ static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra)) {
return 0;
}
static int UU() xmalloc_free_op(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra)) {
size_t s = 256;
void *p = toku_xmalloc(s);
toku_free(p);
return 0;
}
static int UU() malloc_free_op(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra)) {
size_t s = 256;
void *p = malloc(s);
free(p);
return 0;
}
static int UU() loader_op(DB_TXN* txn, ARG UU(arg), void* UU(operation_extra)) {
DB_ENV* env = arg->env;
int r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment