Commit 635ac358 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

refs #6058, merge to main!

git-svn-id: file:///svn/toku/tokudb@54234 c7de825b-a66e-492c-adef-691d508d4ae1
parent 90585528
...@@ -249,6 +249,7 @@ static void print_defines (void) { ...@@ -249,6 +249,7 @@ static void print_defines (void) {
#endif #endif
dodefine_from_track(txn_flags, DB_INHERIT_ISOLATION); dodefine_from_track(txn_flags, DB_INHERIT_ISOLATION);
dodefine_from_track(txn_flags, DB_SERIALIZABLE); dodefine_from_track(txn_flags, DB_SERIALIZABLE);
dodefine_from_track(txn_flags, DB_TXN_READ_ONLY);
} }
/* TOKUDB specific error codes*/ /* TOKUDB specific error codes*/
......
...@@ -3935,7 +3935,10 @@ static int ...@@ -3935,7 +3935,10 @@ static int
does_txn_read_entry(TXNID id, TOKUTXN context) { does_txn_read_entry(TXNID id, TOKUTXN context) {
int rval; int rval;
TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(context); TXNID oldest_live_in_snapshot = toku_get_oldest_in_live_root_txn_list(context);
if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) { if (oldest_live_in_snapshot == TXNID_NONE && id < context->snapshot_txnid64) {
rval = TOKUDB_ACCEPT;
}
else if (id < oldest_live_in_snapshot || id == context->txnid.parent_id64) {
rval = TOKUDB_ACCEPT; rval = TOKUDB_ACCEPT;
} }
else if (id > context->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*context->live_root_txn_list, id)) { else if (id > context->snapshot_txnid64 || toku_is_txn_in_live_root_txn_list(*context->live_root_txn_list, id)) {
......
...@@ -47,6 +47,7 @@ typedef struct txnid_pair_s { ...@@ -47,6 +47,7 @@ typedef struct txnid_pair_s {
#define TXNID_NONE_LIVING ((TXNID)0) #define TXNID_NONE_LIVING ((TXNID)0)
#define TXNID_NONE ((TXNID)0) #define TXNID_NONE ((TXNID)0)
#define TXNID_MAX ((TXNID)-1)
static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE }; static const TXNID_PAIR TXNID_PAIR_NONE = { .parent_id64 = TXNID_NONE, .child_id64 = TXNID_NONE };
......
...@@ -170,6 +170,7 @@ struct tokutxn { ...@@ -170,6 +170,7 @@ struct tokutxn {
TXNID oldest_referenced_xid; TXNID oldest_referenced_xid;
bool begin_was_logged; bool begin_was_logged;
bool declared_read_only; // true if the txn was declared read only when began
// These are not read until a commit, prepare, or abort starts, and // These are not read until a commit, prepare, or abort starts, and
// they're "monotonic" (only go false->true) during operation: // they're "monotonic" (only go false->true) during operation:
bool do_fsync; bool do_fsync;
......
...@@ -412,6 +412,7 @@ generate_log_writer (void) { ...@@ -412,6 +412,7 @@ generate_log_writer (void) {
fprintf(cf, " //txn can be NULL during tests\n"); fprintf(cf, " //txn can be NULL during tests\n");
fprintf(cf, " //never null when not checkpoint.\n"); fprintf(cf, " //never null when not checkpoint.\n");
fprintf(cf, " if (txn && !txn->begin_was_logged) {\n"); fprintf(cf, " if (txn && !txn->begin_was_logged) {\n");
fprintf(cf, " invariant(!txn_declared_read_only(txn));\n");
fprintf(cf, " toku_maybe_log_begin_txn_for_write_operation(txn);\n"); fprintf(cf, " toku_maybe_log_begin_txn_for_write_operation(txn);\n");
fprintf(cf, " }\n"); fprintf(cf, " }\n");
break; break;
...@@ -419,6 +420,7 @@ generate_log_writer (void) { ...@@ -419,6 +420,7 @@ generate_log_writer (void) {
case ASSERT_BEGIN_WAS_LOGGED: { case ASSERT_BEGIN_WAS_LOGGED: {
fprintf(cf, " //txn can be NULL during tests\n"); fprintf(cf, " //txn can be NULL during tests\n");
fprintf(cf, " invariant(!txn || txn->begin_was_logged);\n"); fprintf(cf, " invariant(!txn || txn->begin_was_logged);\n");
fprintf(cf, " invariant(!txn || !txn_declared_read_only(txn));\n");
break; break;
} }
case IGNORE_LOG_BEGIN: break; case IGNORE_LOG_BEGIN: break;
......
...@@ -480,7 +480,16 @@ recover_transaction(TOKUTXN *txnp, TXNID_PAIR xid, TXNID_PAIR parentxid, TOKULOG ...@@ -480,7 +480,16 @@ recover_transaction(TOKUTXN *txnp, TXNID_PAIR xid, TXNID_PAIR parentxid, TOKULOG
toku_txnid2txn(logger, xid, &txn); toku_txnid2txn(logger, xid, &txn);
assert(txn==NULL); assert(txn==NULL);
} }
r = toku_txn_begin_with_xid(parent, &txn, logger, xid, TXN_SNAPSHOT_NONE, NULL, true); r = toku_txn_begin_with_xid(
parent,
&txn,
logger,
xid,
TXN_SNAPSHOT_NONE,
NULL,
true, // for_recovery
false // read_only
);
assert(r == 0); assert(r == 0);
// We only know about it because it was logged. Restore the log bit. // We only know about it because it was logged. Restore the log bit.
// Logging is 'off' but it will still set the bit. // Logging is 'off' but it will still set the bit.
......
...@@ -97,6 +97,7 @@ test_writer_priority_thread (void *arg) { ...@@ -97,6 +97,7 @@ test_writer_priority_thread (void *arg) {
static void static void
test_writer_priority (void) { test_writer_priority (void) {
struct rw_event rw_event, *rwe = &rw_event; struct rw_event rw_event, *rwe = &rw_event;
ZERO_STRUCT(rw_event);
int r; int r;
rw_event_init(rwe); rw_event_init(rwe);
...@@ -152,6 +153,7 @@ test_single_writer_thread (void *arg) { ...@@ -152,6 +153,7 @@ test_single_writer_thread (void *arg) {
static void static void
test_single_writer (void) { test_single_writer (void) {
struct rw_event rw_event, *rwe = &rw_event; struct rw_event rw_event, *rwe = &rw_event;
ZERO_STRUCT(rw_event);
int r; int r;
rw_event_init(rwe); rw_event_init(rwe);
......
...@@ -32,7 +32,7 @@ static void test_it (int N) { ...@@ -32,7 +32,7 @@ static void test_it (int N) {
r = toku_logger_open_rollback(logger, ct, true); CKERR(r); r = toku_logger_open_rollback(logger, ct, true); CKERR(r);
TOKUTXN txn; TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
...@@ -44,12 +44,12 @@ static void test_it (int N) { ...@@ -44,12 +44,12 @@ static void test_it (int N) {
unsigned int rands[N]; unsigned int rands[N];
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
char key[100],val[300]; char key[100],val[300];
DBT k, v; DBT k, v;
rands[i] = random(); rands[i] = random();
...@@ -67,12 +67,12 @@ static void test_it (int N) { ...@@ -67,12 +67,12 @@ static void test_it (int N) {
if (verbose) printf("i=%d\n", i); if (verbose) printf("i=%d\n", i);
} }
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
char key[100]; char key[100];
DBT k; DBT k;
snprintf(key, sizeof(key), "key%x.%x", rands[i], i); snprintf(key, sizeof(key), "key%x.%x", rands[i], i);
...@@ -92,7 +92,7 @@ static void test_it (int N) { ...@@ -92,7 +92,7 @@ static void test_it (int N) {
if (verbose) printf("d=%d\n", i); if (verbose) printf("d=%d\n", i);
} }
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT); CKERR(r); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_ROOT, false); CKERR(r);
r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r); r = toku_open_ft_handle(FILENAME, 0, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); CKERR(r);
r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r); r = toku_txn_commit_txn(txn, false, NULL, NULL); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
......
...@@ -50,7 +50,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -50,7 +50,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
TOKUTXN txn = NULL; TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE brt = NULL;
...@@ -62,7 +62,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -62,7 +62,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
txn = NULL; txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
// insert keys 0, 1, 2, .. (n-1) // insert keys 0, 1, 2, .. (n-1)
...@@ -120,7 +120,7 @@ test_provdel(const char *logdir, const char *fname, int n) { ...@@ -120,7 +120,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
TOKUTXN txn = NULL; TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE brt = NULL;
...@@ -132,7 +132,7 @@ test_provdel(const char *logdir, const char *fname, int n) { ...@@ -132,7 +132,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
txn = NULL; txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
// del keys 0, 2, 4, ... // del keys 0, 2, 4, ...
...@@ -145,7 +145,7 @@ test_provdel(const char *logdir, const char *fname, int n) { ...@@ -145,7 +145,7 @@ test_provdel(const char *logdir, const char *fname, int n) {
} }
TOKUTXN cursortxn = NULL; TOKUTXN cursortxn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &cursortxn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &cursortxn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
LE_CURSOR cursor = NULL; LE_CURSOR cursor = NULL;
......
...@@ -54,7 +54,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -54,7 +54,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
TOKUTXN txn = NULL; TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE brt = NULL;
...@@ -66,7 +66,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -66,7 +66,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
txn = NULL; txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
// insert keys 0, 1, 2, .. (n-1) // insert keys 0, 1, 2, .. (n-1)
......
...@@ -51,7 +51,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -51,7 +51,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
assert(error == 0); assert(error == 0);
TOKUTXN txn = NULL; TOKUTXN txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
FT_HANDLE brt = NULL; FT_HANDLE brt = NULL;
...@@ -63,7 +63,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) { ...@@ -63,7 +63,7 @@ create_populate_tree(const char *logdir, const char *fname, int n) {
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
txn = NULL; txn = NULL;
error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE); error = toku_txn_begin_txn(NULL, NULL, &txn, logger, TXN_SNAPSHOT_NONE, false);
assert(error == 0); assert(error == 0);
// insert keys 0, 1, 2, .. (n-1) // insert keys 0, 1, 2, .. (n-1)
......
...@@ -47,7 +47,8 @@ void txn_child_manager_unit_test::run_child_txn_test() { ...@@ -47,7 +47,8 @@ void txn_child_manager_unit_test::run_child_txn_test() {
NULL, NULL,
&root_txn, &root_txn,
logger, logger,
TXN_SNAPSHOT_CHILD TXN_SNAPSHOT_CHILD,
false
); );
CKERR(r); CKERR(r);
// test starting a child txn // test starting a child txn
...@@ -57,7 +58,8 @@ void txn_child_manager_unit_test::run_child_txn_test() { ...@@ -57,7 +58,8 @@ void txn_child_manager_unit_test::run_child_txn_test() {
root_txn, root_txn,
&child_txn, &child_txn,
logger, logger,
TXN_SNAPSHOT_CHILD TXN_SNAPSHOT_CHILD,
false
); );
CKERR(r); CKERR(r);
...@@ -89,7 +91,8 @@ void txn_child_manager_unit_test::run_test() { ...@@ -89,7 +91,8 @@ void txn_child_manager_unit_test::run_test() {
NULL, NULL,
&root_txn, &root_txn,
logger, logger,
TXN_SNAPSHOT_ROOT TXN_SNAPSHOT_ROOT,
false
); );
CKERR(r); CKERR(r);
txn_child_manager* cm = root_txn->child_manager; txn_child_manager* cm = root_txn->child_manager;
...@@ -108,7 +111,8 @@ void txn_child_manager_unit_test::run_test() { ...@@ -108,7 +111,8 @@ void txn_child_manager_unit_test::run_test() {
root_txn, root_txn,
&child_txn, &child_txn,
logger, logger,
TXN_SNAPSHOT_ROOT TXN_SNAPSHOT_ROOT,
false
); );
CKERR(r); CKERR(r);
assert(child_txn->child_manager == cm); assert(child_txn->child_manager == cm);
...@@ -128,7 +132,8 @@ void txn_child_manager_unit_test::run_test() { ...@@ -128,7 +132,8 @@ void txn_child_manager_unit_test::run_test() {
child_txn, child_txn,
&grandchild_txn, &grandchild_txn,
logger, logger,
TXN_SNAPSHOT_ROOT TXN_SNAPSHOT_ROOT,
false
); );
CKERR(r); CKERR(r);
assert(grandchild_txn->child_manager == cm); assert(grandchild_txn->child_manager == cm);
...@@ -153,7 +158,8 @@ void txn_child_manager_unit_test::run_test() { ...@@ -153,7 +158,8 @@ void txn_child_manager_unit_test::run_test() {
child_txn, child_txn,
&grandchild_txn, &grandchild_txn,
logger, logger,
TXN_SNAPSHOT_ROOT TXN_SNAPSHOT_ROOT,
false
); );
CKERR(r); CKERR(r);
assert(grandchild_txn->child_manager == cm); assert(grandchild_txn->child_manager == cm);
...@@ -177,7 +183,8 @@ void txn_child_manager_unit_test::run_test() { ...@@ -177,7 +183,8 @@ void txn_child_manager_unit_test::run_test() {
xid, xid,
TXN_SNAPSHOT_NONE, TXN_SNAPSHOT_NONE,
NULL, NULL,
true // for recovery true, // for recovery
false // read_only
); );
assert(recovery_txn->child_manager == cm); assert(recovery_txn->child_manager == cm);
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
static void do_txn(TOKULOGGER logger, bool readonly) { static void do_txn(TOKULOGGER logger, bool readonly) {
int r; int r;
TOKUTXN txn; TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
if (!readonly) { if (!readonly) {
...@@ -37,7 +37,7 @@ static void test_xid_lsn_independent(int N) { ...@@ -37,7 +37,7 @@ static void test_xid_lsn_independent(int N) {
FT_HANDLE brt; FT_HANDLE brt;
TOKUTXN txn; TOKUTXN txn;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
r = toku_open_ft_handle("ftfile", 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun); r = toku_open_ft_handle("ftfile", 1, &brt, 1024, 256, TOKU_DEFAULT_COMPRESSION_METHOD, ct, txn, toku_builtin_compare_fun);
...@@ -47,7 +47,7 @@ static void test_xid_lsn_independent(int N) { ...@@ -47,7 +47,7 @@ static void test_xid_lsn_independent(int N) {
CKERR(r); CKERR(r);
toku_txn_close_txn(txn); toku_txn_close_txn(txn);
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
TXNID xid_first = txn->txnid.parent_id64; TXNID xid_first = txn->txnid.parent_id64;
unsigned int rands[N]; unsigned int rands[N];
...@@ -62,7 +62,7 @@ static void test_xid_lsn_independent(int N) { ...@@ -62,7 +62,7 @@ static void test_xid_lsn_independent(int N) {
} }
{ {
TOKUTXN txn2; TOKUTXN txn2;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn2, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
// Verify the txnid has gone up only by one (even though many log entries were done) // Verify the txnid has gone up only by one (even though many log entries were done)
invariant(txn2->txnid.parent_id64 == xid_first + 1); invariant(txn2->txnid.parent_id64 == xid_first + 1);
...@@ -77,7 +77,7 @@ static void test_xid_lsn_independent(int N) { ...@@ -77,7 +77,7 @@ static void test_xid_lsn_independent(int N) {
//TODO(yoni) #5067 will break this portion of the test. (End ids are also assigned, so it would increase by 4 instead of 2.) //TODO(yoni) #5067 will break this portion of the test. (End ids are also assigned, so it would increase by 4 instead of 2.)
// Verify the txnid has gone up only by two (even though many log entries were done) // Verify the txnid has gone up only by two (even though many log entries were done)
TOKUTXN txn3; TOKUTXN txn3;
r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, (TOKUTXN)0, &txn3, logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
invariant(txn3->txnid.parent_id64 == xid_first + 2); invariant(txn3->txnid.parent_id64 == xid_first + 2);
r = toku_txn_commit_txn(txn3, false, NULL, NULL); r = toku_txn_commit_txn(txn3, false, NULL, NULL);
...@@ -173,7 +173,7 @@ static void test_xid_lsn_independent_parents(int N) { ...@@ -173,7 +173,7 @@ static void test_xid_lsn_independent_parents(int N) {
ZERO_ARRAY(txns_hack); ZERO_ARRAY(txns_hack);
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE); r = toku_txn_begin_txn((DB_TXN*)NULL, txns[i-1], &txns[i], logger, TXN_SNAPSHOT_NONE, false);
CKERR(r); CKERR(r);
if (i < num_non_cascade) { if (i < num_non_cascade) {
......
...@@ -37,6 +37,7 @@ txn_status_init(void) { ...@@ -37,6 +37,7 @@ txn_status_init(void) {
// Note, this function initializes the keyname, type, and legend fields. // Note, this function initializes the keyname, type, and legend fields.
// Value fields are initialized to zero by compiler. // Value fields are initialized to zero by compiler.
STATUS_INIT(TXN_BEGIN, PARCOUNT, "begin"); STATUS_INIT(TXN_BEGIN, PARCOUNT, "begin");
STATUS_INIT(TXN_READ_BEGIN, PARCOUNT, "begin read only");
STATUS_INIT(TXN_COMMIT, PARCOUNT, "successful commits"); STATUS_INIT(TXN_COMMIT, PARCOUNT, "successful commits");
STATUS_INIT(TXN_ABORT, PARCOUNT, "aborts"); STATUS_INIT(TXN_ABORT, PARCOUNT, "aborts");
txn_status.initialized = true; txn_status.initialized = true;
...@@ -77,19 +78,52 @@ toku_txn_get_root_id(TOKUTXN txn) ...@@ -77,19 +78,52 @@ toku_txn_get_root_id(TOKUTXN txn)
return txn->txnid.parent_id64; return txn->txnid.parent_id64;
} }
bool txn_declared_read_only(TOKUTXN txn) {
return txn->declared_read_only;
}
int int
toku_txn_begin_txn ( toku_txn_begin_txn (
DB_TXN *container_db_txn, DB_TXN *container_db_txn,
TOKUTXN parent_tokutxn, TOKUTXN parent_tokutxn,
TOKUTXN *tokutxn, TOKUTXN *tokutxn,
TOKULOGGER logger, TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
) )
{ {
int r = toku_txn_begin_with_xid(parent_tokutxn, tokutxn, logger, TXNID_PAIR_NONE, snapshot_type, container_db_txn, false); int r = toku_txn_begin_with_xid(
parent_tokutxn,
tokutxn,
logger,
TXNID_PAIR_NONE,
snapshot_type,
container_db_txn,
false, // for_recovery
read_only
);
return r; return r;
} }
static void
txn_create_xids(TOKUTXN txn, TOKUTXN parent) {
XIDS xids;
XIDS parent_xids;
if (parent == NULL) {
parent_xids = xids_get_root_xids();
} else {
parent_xids = parent->xids;
}
xids_create_unknown_child(parent_xids, &xids);
TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64;
xids_finalize_with_child(xids, finalized_xid);
txn->xids = xids;
}
// Allocate and initialize a txn
static void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, bool for_checkpoint, bool read_only);
int int
toku_txn_begin_with_xid ( toku_txn_begin_with_xid (
TOKUTXN parent, TOKUTXN parent,
...@@ -98,24 +132,22 @@ toku_txn_begin_with_xid ( ...@@ -98,24 +132,22 @@ toku_txn_begin_with_xid (
TXNID_PAIR xid, TXNID_PAIR xid,
TXN_SNAPSHOT_TYPE snapshot_type, TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn, DB_TXN *container_db_txn,
bool for_recovery bool for_recovery,
bool read_only
) )
{ {
int r = 0; int r = 0;
TOKUTXN txn; TOKUTXN txn;
XIDS xids; // check for case where we are trying to
// Do as much (safe) work as possible before serializing on the txn_manager lock. // create too many nested transactions
XIDS parent_xids; if (!read_only && parent && !xids_can_create_child(parent->xids)) {
if (parent == NULL) { r = EINVAL;
parent_xids = xids_get_root_xids(); goto exit;
} else {
parent_xids = parent->xids;
} }
r = xids_create_unknown_child(parent_xids, &xids); if (read_only && parent) {
if (r != 0) { invariant(txn_declared_read_only(parent));
return r;
} }
toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, xids, for_recovery); toku_txn_create_txn(&txn, parent, logger, snapshot_type, container_db_txn, for_recovery, read_only);
// txnid64, snapshot_txnid64 // txnid64, snapshot_txnid64
// will be set in here. // will be set in here.
if (for_recovery) { if (for_recovery) {
...@@ -139,7 +171,8 @@ toku_txn_begin_with_xid ( ...@@ -139,7 +171,8 @@ toku_txn_begin_with_xid (
toku_txn_manager_start_txn( toku_txn_manager_start_txn(
txn, txn,
logger->txn_manager, logger->txn_manager,
snapshot_type snapshot_type,
read_only
); );
} }
else { else {
...@@ -152,10 +185,12 @@ toku_txn_begin_with_xid ( ...@@ -152,10 +185,12 @@ toku_txn_begin_with_xid (
); );
} }
} }
TXNID finalized_xid = (parent == NULL) ? txn->txnid.parent_id64 : txn->txnid.child_id64; if (!read_only) {
xids_finalize_with_child(txn->xids, finalized_xid); // this call will set txn->xids
txn_create_xids(txn, parent);
}
*txnp = txn; *txnp = txn;
exit:
return r; return r;
} }
...@@ -174,14 +209,14 @@ static void invalidate_xa_xid (TOKU_XA_XID *xid) { ...@@ -174,14 +209,14 @@ static void invalidate_xa_xid (TOKU_XA_XID *xid) {
xid->formatID = -1; // According to the XA spec, -1 means "invalid data" xid->formatID = -1; // According to the XA spec, -1 means "invalid data"
} }
void toku_txn_create_txn ( static void toku_txn_create_txn (
TOKUTXN *tokutxn, TOKUTXN *tokutxn,
TOKUTXN parent_tokutxn, TOKUTXN parent_tokutxn,
TOKULOGGER logger, TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type, TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn, DB_TXN *container_db_txn,
XIDS xids, bool for_recovery,
bool for_recovery bool read_only
) )
{ {
assert(logger->rollback_cachefile); assert(logger->rollback_cachefile);
...@@ -216,9 +251,10 @@ static txn_child_manager tcm; ...@@ -216,9 +251,10 @@ static txn_child_manager tcm;
.child_manager = NULL, .child_manager = NULL,
.container_db_txn = container_db_txn, .container_db_txn = container_db_txn,
.live_root_txn_list = nullptr, .live_root_txn_list = nullptr,
.xids = xids, .xids = NULL,
.oldest_referenced_xid = TXNID_NONE, .oldest_referenced_xid = TXNID_NONE,
.begin_was_logged = false, .begin_was_logged = false,
.declared_read_only = read_only,
.do_fsync = false, .do_fsync = false,
.force_fsync_on_commit = false, .force_fsync_on_commit = false,
.do_fsync_lsn = ZERO_LSN, .do_fsync_lsn = ZERO_LSN,
...@@ -257,7 +293,12 @@ static txn_child_manager tcm; ...@@ -257,7 +293,12 @@ static txn_child_manager tcm;
*tokutxn = result; *tokutxn = result;
STATUS_INC(TXN_BEGIN, 1); if (read_only) {
STATUS_INC(TXN_READ_BEGIN, 1);
}
else {
STATUS_INC(TXN_BEGIN, 1);
}
} }
void void
...@@ -540,7 +581,9 @@ void toku_txn_complete_txn(TOKUTXN txn) { ...@@ -540,7 +581,9 @@ void toku_txn_complete_txn(TOKUTXN txn) {
void toku_txn_destroy_txn(TOKUTXN txn) { void toku_txn_destroy_txn(TOKUTXN txn) {
txn->open_fts.destroy(); txn->open_fts.destroy();
xids_destroy(&txn->xids); if (txn->xids) {
xids_destroy(&txn->xids);
}
toku_mutex_destroy(&txn->txn_lock); toku_mutex_destroy(&txn->txn_lock);
toku_mutex_destroy(&txn->state_lock); toku_mutex_destroy(&txn->state_lock);
toku_cond_destroy(&txn->state_cond); toku_cond_destroy(&txn->state_cond);
...@@ -557,10 +600,14 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn) { ...@@ -557,10 +600,14 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn) {
} }
TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) { TXNID toku_get_oldest_in_live_root_txn_list(TOKUTXN txn) {
invariant(txn->live_root_txn_list->size()>0);
TXNID xid; TXNID xid;
int r = txn->live_root_txn_list->fetch(0, &xid); if (txn->live_root_txn_list->size()>0) {
assert_zero(r); int r = txn->live_root_txn_list->fetch(0, &xid);
assert_zero(r);
}
else {
xid = TXNID_NONE;
}
return xid; return xid;
} }
......
...@@ -29,13 +29,15 @@ void toku_txn_lock(TOKUTXN txn); ...@@ -29,13 +29,15 @@ void toku_txn_lock(TOKUTXN txn);
void toku_txn_unlock(TOKUTXN txn); void toku_txn_unlock(TOKUTXN txn);
uint64_t toku_txn_get_root_id(TOKUTXN txn); uint64_t toku_txn_get_root_id(TOKUTXN txn);
bool txn_declared_read_only(TOKUTXN txn);
int toku_txn_begin_txn ( int toku_txn_begin_txn (
DB_TXN *container_db_txn, DB_TXN *container_db_txn,
TOKUTXN parent_tokutxn, TOKUTXN parent_tokutxn,
TOKUTXN *tokutxn, TOKUTXN *tokutxn,
TOKULOGGER logger, TOKULOGGER logger,
TXN_SNAPSHOT_TYPE snapshot_type TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
); );
DB_TXN * toku_txn_get_container_db_txn (TOKUTXN tokutxn); DB_TXN * toku_txn_get_container_db_txn (TOKUTXN tokutxn);
...@@ -49,11 +51,10 @@ int toku_txn_begin_with_xid ( ...@@ -49,11 +51,10 @@ int toku_txn_begin_with_xid (
TXNID_PAIR xid, TXNID_PAIR xid,
TXN_SNAPSHOT_TYPE snapshot_type, TXN_SNAPSHOT_TYPE snapshot_type,
DB_TXN *container_db_txn, DB_TXN *container_db_txn,
bool for_recovery bool for_recovery,
bool read_only
); );
// Allocate and initialize a txn
void toku_txn_create_txn(TOKUTXN *txn_ptr, TOKUTXN parent, TOKULOGGER logger, TXN_SNAPSHOT_TYPE snapshot_type, DB_TXN *container_db_txn, XIDS xids, bool for_checkpoint);
void toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid); void toku_txn_update_xids_in_txn(TOKUTXN txn, TXNID xid);
int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info); int toku_txn_load_txninfo (TOKUTXN txn, TXNINFO info);
...@@ -94,6 +95,7 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn); ...@@ -94,6 +95,7 @@ void toku_txn_force_fsync_on_commit(TOKUTXN txn);
typedef enum { typedef enum {
TXN_BEGIN, // total number of transactions begun (does not include recovered txns) TXN_BEGIN, // total number of transactions begun (does not include recovered txns)
TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns)
TXN_COMMIT, // successful commits TXN_COMMIT, // successful commits
TXN_ABORT, TXN_ABORT,
TXN_STATUS_NUM_ROWS TXN_STATUS_NUM_ROWS
......
...@@ -192,9 +192,13 @@ void toku_txn_manager_init(TXN_MANAGER* txn_managerp) { ...@@ -192,9 +192,13 @@ void toku_txn_manager_init(TXN_MANAGER* txn_managerp) {
void toku_txn_manager_destroy(TXN_MANAGER txn_manager) { void toku_txn_manager_destroy(TXN_MANAGER txn_manager) {
toku_mutex_destroy(&txn_manager->txn_manager_lock); toku_mutex_destroy(&txn_manager->txn_manager_lock);
invariant(txn_manager->live_root_txns.size() == 0);
txn_manager->live_root_txns.destroy(); txn_manager->live_root_txns.destroy();
invariant(txn_manager->live_root_ids.size() == 0);
txn_manager->live_root_ids.destroy(); txn_manager->live_root_ids.destroy();
invariant(txn_manager->snapshot_txnids.size() == 0);
txn_manager->snapshot_txnids.destroy(); txn_manager->snapshot_txnids.destroy();
invariant(txn_manager->referenced_xids.size() == 0);
txn_manager->referenced_xids.destroy(); txn_manager->referenced_xids.destroy();
toku_free(txn_manager); toku_free(txn_manager);
} }
...@@ -264,19 +268,33 @@ max_xid(TXNID a, TXNID b) { ...@@ -264,19 +268,33 @@ max_xid(TXNID a, TXNID b) {
} }
static TXNID get_oldest_referenced_xid_unlocked(TXN_MANAGER txn_manager) { static TXNID get_oldest_referenced_xid_unlocked(TXN_MANAGER txn_manager) {
TXNID oldest_referenced_xid = TXNID_NONE_LIVING; TXNID oldest_referenced_xid = TXNID_MAX;
int r = txn_manager->live_root_ids.fetch(0, &oldest_referenced_xid); int r;
// this function should only be called when we know there is at least if (txn_manager->live_root_ids.size() > 0) {
// one live transaction r = txn_manager->live_root_ids.fetch(0, &oldest_referenced_xid);
invariant_zero(r); // this function should only be called when we know there is at least
// one live transaction
struct referenced_xid_tuple* tuple; invariant_zero(r);
}
if (txn_manager->referenced_xids.size() > 0) { if (txn_manager->referenced_xids.size() > 0) {
struct referenced_xid_tuple* tuple;
r = txn_manager->referenced_xids.fetch(0, &tuple); r = txn_manager->referenced_xids.fetch(0, &tuple);
if (r == 0 && tuple->begin_id < oldest_referenced_xid) { if (r == 0 && tuple->begin_id < oldest_referenced_xid) {
oldest_referenced_xid = tuple->begin_id; oldest_referenced_xid = tuple->begin_id;
} }
} }
if (txn_manager->snapshot_txnids.size() > 0) {
TXNID id;
r = txn_manager->snapshot_txnids.fetch(0, &id);
if (r == 0 && id < oldest_referenced_xid) {
oldest_referenced_xid = id;
}
}
if (txn_manager->last_xid < oldest_referenced_xid) {
oldest_referenced_xid = txn_manager->last_xid;
}
paranoid_invariant(oldest_referenced_xid != TXNID_MAX);
return oldest_referenced_xid; return oldest_referenced_xid;
} }
...@@ -492,7 +510,8 @@ void toku_txn_manager_start_txn_for_recovery( ...@@ -492,7 +510,8 @@ void toku_txn_manager_start_txn_for_recovery(
void toku_txn_manager_start_txn( void toku_txn_manager_start_txn(
TOKUTXN txn, TOKUTXN txn,
TXN_MANAGER txn_manager, TXN_MANAGER txn_manager,
TXN_SNAPSHOT_TYPE snapshot_type TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
) )
{ {
int r; int r;
...@@ -528,13 +547,15 @@ void toku_txn_manager_start_txn( ...@@ -528,13 +547,15 @@ void toku_txn_manager_start_txn(
// is taken into account when the transaction is closed. // is taken into account when the transaction is closed.
// add ancestor information, and maintain global live root txn list // add ancestor information, and maintain global live root txn list
xid = ++txn_manager->last_xid; xid = ++txn_manager->last_xid; // we always need an ID, needed for lock tree
toku_txn_update_xids_in_txn(txn, xid); toku_txn_update_xids_in_txn(txn, xid);
uint32_t idx = txn_manager->live_root_txns.size(); if (!read_only) {
r = txn_manager->live_root_txns.insert_at(txn, idx); uint32_t idx = txn_manager->live_root_txns.size();
invariant_zero(r); r = txn_manager->live_root_txns.insert_at(txn, idx);
r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx); invariant_zero(r);
invariant_zero(r); r = txn_manager->live_root_ids.insert_at(txn->txnid.parent_id64, idx);
invariant_zero(r);
}
txn->oldest_referenced_xid = get_oldest_referenced_xid_unlocked(txn_manager); txn->oldest_referenced_xid = get_oldest_referenced_xid_unlocked(txn_manager);
if (needs_snapshot) { if (needs_snapshot) {
...@@ -548,6 +569,7 @@ void toku_txn_manager_start_txn( ...@@ -548,6 +569,7 @@ void toku_txn_manager_start_txn(
verify_snapshot_system(txn_manager); verify_snapshot_system(txn_manager);
} }
txn_manager_unlock(txn_manager); txn_manager_unlock(txn_manager);
return;
} }
TXNID TXNID
...@@ -593,37 +615,39 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) { ...@@ -593,37 +615,39 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
); );
} }
uint32_t idx; if (!txn_declared_read_only(txn)) {
//Remove txn from list of live root txns uint32_t idx;
TOKUTXN txnagain; //Remove txn from list of live root txns
r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx); TOKUTXN txnagain;
invariant_zero(r); r = txn_manager->live_root_txns.find_zero<TOKUTXN, find_xid>(txn, &txnagain, &idx);
invariant(txn==txnagain); invariant_zero(r);
invariant(txn==txnagain);
r = txn_manager->live_root_txns.delete_at(idx); r = txn_manager->live_root_txns.delete_at(idx);
invariant_zero(r); invariant_zero(r);
r = txn_manager->live_root_ids.delete_at(idx); r = txn_manager->live_root_ids.delete_at(idx);
invariant_zero(r); invariant_zero(r);
if (!toku_txn_is_read_only(txn) || garbage_collection_debug) { if (!toku_txn_is_read_only(txn) || garbage_collection_debug) {
if (!is_snapshot) { if (!is_snapshot) {
// //
// If it's a snapshot, we already calculated index_in_snapshot_txnids. // If it's a snapshot, we already calculated index_in_snapshot_txnids.
// Otherwise, calculate it now. // Otherwise, calculate it now.
// //
r = txn_manager->snapshot_txnids.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids); r = txn_manager->snapshot_txnids.find_zero<TXNID, toku_find_xid_by_xid>(txn->txnid.parent_id64, nullptr, &index_in_snapshot_txnids);
invariant(r == DB_NOTFOUND); invariant(r == DB_NOTFOUND);
} }
uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids; uint32_t num_references = txn_manager->snapshot_txnids.size() - index_in_snapshot_txnids;
if (num_references > 0) { if (num_references > 0) {
// This transaction exists in a live list of another transaction. // This transaction exists in a live list of another transaction.
struct referenced_xid_tuple tuple = { struct referenced_xid_tuple tuple = {
.begin_id = txn->txnid.parent_id64, .begin_id = txn->txnid.parent_id64,
.end_id = ++txn_manager->last_xid, .end_id = ++txn_manager->last_xid,
.references = num_references .references = num_references
}; };
r = txn_manager->referenced_xids.insert<TXNID, find_tuple_by_xid>(tuple, txn->txnid.parent_id64, nullptr); r = txn_manager->referenced_xids.insert<TXNID, find_tuple_by_xid>(tuple, txn->txnid.parent_id64, nullptr);
lazy_assert_zero(r); lazy_assert_zero(r);
}
} }
} }
...@@ -638,6 +662,7 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) { ...@@ -638,6 +662,7 @@ void toku_txn_manager_finish_txn(TXN_MANAGER txn_manager, TOKUTXN txn) {
txn->live_root_txn_list->destroy(); txn->live_root_txn_list->destroy();
toku_free(txn->live_root_txn_list); toku_free(txn->live_root_txn_list);
} }
return;
} }
void toku_txn_manager_clone_state_for_gc( void toku_txn_manager_clone_state_for_gc(
......
...@@ -58,7 +58,8 @@ void toku_txn_manager_handle_snapshot_destroy_for_child_txn( ...@@ -58,7 +58,8 @@ void toku_txn_manager_handle_snapshot_destroy_for_child_txn(
void toku_txn_manager_start_txn( void toku_txn_manager_start_txn(
TOKUTXN txn, TOKUTXN txn,
TXN_MANAGER txn_manager, TXN_MANAGER txn_manager,
TXN_SNAPSHOT_TYPE snapshot_type TXN_SNAPSHOT_TYPE snapshot_type,
bool read_only
); );
void toku_txn_manager_start_txn_for_recovery( void toku_txn_manager_start_txn_for_recovery(
......
...@@ -62,6 +62,12 @@ xids_get_root_xids(void) { ...@@ -62,6 +62,12 @@ xids_get_root_xids(void) {
return rval; return rval;
} }
bool
xids_can_create_child(XIDS xids) {
invariant(xids->num_xids < MAX_TRANSACTION_RECORDS);
return (xids->num_xids + 1) != MAX_TRANSACTION_RECORDS;
}
int int
xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
...@@ -70,17 +76,15 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) { ...@@ -70,17 +76,15 @@ xids_create_unknown_child(XIDS parent_xids, XIDS *xids_p) {
int rval; int rval;
invariant(parent_xids); invariant(parent_xids);
uint32_t num_child_xids = parent_xids->num_xids + 1; uint32_t num_child_xids = parent_xids->num_xids + 1;
invariant(num_child_xids > 0); // assumes that caller has verified that num_child_xids will
invariant(num_child_xids <= MAX_TRANSACTION_RECORDS); // be less than MAX_TRANSACTIN_RECORDS
if (num_child_xids == MAX_TRANSACTION_RECORDS) rval = EINVAL; invariant(num_child_xids < MAX_TRANSACTION_RECORDS);
else { size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]);
size_t new_size = sizeof(*parent_xids) + num_child_xids*sizeof(parent_xids->ids[0]); XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size));
XIDS CAST_FROM_VOIDP(xids, toku_xmalloc(new_size)); // Clone everything (parent does not have the newest xid).
// Clone everything (parent does not have the newest xid). memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0]));
memcpy(xids, parent_xids, new_size - sizeof(xids->ids[0])); *xids_p = xids;
*xids_p = xids; rval = 0;
rval = 0;
}
return rval; return rval;
} }
...@@ -99,11 +103,13 @@ int ...@@ -99,11 +103,13 @@ int
xids_create_child(XIDS parent_xids, // xids list for parent transaction xids_create_child(XIDS parent_xids, // xids list for parent transaction
XIDS * xids_p, // xids list created XIDS * xids_p, // xids list created
TXNID this_xid) { // xid of this transaction (new innermost) TXNID this_xid) { // xid of this transaction (new innermost)
int rval = xids_create_unknown_child(parent_xids, xids_p); bool can_create_child = xids_can_create_child(parent_xids);
if (rval == 0) { if (!can_create_child) {
xids_finalize_with_child(*xids_p, this_xid); return EINVAL;
} }
return rval; xids_create_unknown_child(parent_xids, xids_p);
xids_finalize_with_child(*xids_p, this_xid);
return 0;
} }
void void
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
//Retrieve an XIDS representing the root transaction. //Retrieve an XIDS representing the root transaction.
XIDS xids_get_root_xids(void); XIDS xids_get_root_xids(void);
bool xids_can_create_child(XIDS xids);
void xids_cpy(XIDS target, XIDS source); void xids_cpy(XIDS target, XIDS source);
//Creates an XIDS representing this transaction. //Creates an XIDS representing this transaction.
......
...@@ -161,6 +161,7 @@ toku_indexer_create_indexer(DB_ENV *env, ...@@ -161,6 +161,7 @@ toku_indexer_create_indexer(DB_ENV *env,
{ {
int rval; int rval;
DB_INDEXER *indexer = 0; // set later when created DB_INDEXER *indexer = 0; // set later when created
HANDLE_READ_ONLY_TXN(txn);
*indexerp = NULL; *indexerp = NULL;
......
...@@ -169,6 +169,7 @@ toku_loader_create_loader(DB_ENV *env, ...@@ -169,6 +169,7 @@ toku_loader_create_loader(DB_ENV *env,
uint32_t loader_flags, uint32_t loader_flags,
bool check_empty) { bool check_empty) {
int rval; int rval;
HANDLE_READ_ONLY_TXN(txn);
*blp = NULL; // set later when created *blp = NULL; // set later when created
......
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO) set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS _GNU_SOURCE DONT_DEPRECATE_ERRNO)
if(BUILD_TESTING OR BUILD_SRC_TESTS) if(BUILD_TESTING OR BUILD_SRC_TESTS)
function(add_ydb_test bin) function(add_ydb_test bin)
add_toku_test(ydb ${bin} ${ARGN}) add_toku_test(ydb ${bin} ${ARGN})
endfunction(add_ydb_test) endfunction(add_ydb_test)
function(add_ydb_test_aux name bin) function(add_ydb_test_aux name bin)
add_toku_test_aux(ydb ${name} ${bin} ${ARGN}) add_toku_test_aux(ydb ${name} ${bin} ${ARGN})
endfunction(add_ydb_test_aux) endfunction(add_ydb_test_aux)
function(add_ydb_helgrind_test bin) function(add_ydb_helgrind_test bin)
add_helgrind_test(ydb helgrind_${bin} $<TARGET_FILE:${bin}> ${ARGN}) add_helgrind_test(ydb helgrind_${bin} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_helgrind_test) endfunction(add_ydb_helgrind_test)
function(add_ydb_drd_test_aux name bin) function(add_ydb_drd_test_aux name bin)
add_drd_test(ydb ${name} $<TARGET_FILE:${bin}> ${ARGN}) add_drd_test(ydb ${name} $<TARGET_FILE:${bin}> ${ARGN})
endfunction(add_ydb_drd_test_aux) endfunction(add_ydb_drd_test_aux)
function(add_ydb_drd_test bin) function(add_ydb_drd_test bin)
add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN}) add_ydb_drd_test_aux(drd_${bin} ${bin} ${ARGN})
endfunction(add_ydb_drd_test) endfunction(add_ydb_drd_test)
file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc) file(GLOB transparent_upgrade_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" upgrade*.cc)
file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc) file(GLOB tdb_dontrun_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" bdb-simple-deadlock*.cc)
string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}") string(REGEX REPLACE "\\.cc(;|$)" "\\1" tdb_dontrun_tests "${tdb_dontrun_srcs}")
file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc) file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
list(REMOVE_ITEM srcs ${transparent_upgrade_srcs}) list(REMOVE_ITEM srcs ${transparent_upgrade_srcs})
set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc) set(recover_srcs test_log2.cc test_log3.cc test_log4.cc test_log5.cc test_log6.cc test_log7.cc test_log8.cc test_log9.cc test_log10.cc)
file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc) file(GLOB abortrecover_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" recover-*.cc)
file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc) file(GLOB loader_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" loader-*.cc)
file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc) file(GLOB stress_test_srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" test_stress*.cc)
string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".recover\\1" recover_tests "${recover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".abortrecover\\1" abortrecover_tests "${abortrecover_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".loader\\1" loader_tests "${loader_srcs}")
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" stress_tests "${stress_test_srcs}")
set(tdb_srcs ${srcs}) set(tdb_srcs ${srcs})
list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs}) list(REMOVE_ITEM tdb_srcs ${tdb_dontrun_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_bins "${tdb_srcs}")
list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs}) list(REMOVE_ITEM tdb_srcs ${abortrecover_srcs} ${loader_srcs})
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}") string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" tdb_tests "${tdb_srcs}")
if(BDB_FOUND) if(BDB_FOUND)
set(bdb_dontrun_srcs set(bdb_dontrun_srcs
backwards_10_each_le_and_msg backwards_10_each_le_and_msg
blackhole blackhole
blocking-prelock-range blocking-prelock-range
blocking-set-range-reverse-0 blocking-set-range-reverse-0
blocking-table-lock blocking-table-lock
bug1381 bug1381
bug627 bug627
cachetable-race cachetable-race
checkpoint_1 checkpoint_1
checkpoint_callback checkpoint_callback
checkpoint_stress checkpoint_stress
cursor-isolation cursor-isolation
cursor-set-del-rmw cursor-set-del-rmw
cursor-set-range-rmw cursor-set-range-rmw
db-put-simple-deadlock db-put-simple-deadlock
del-simple del-simple
del-multiple del-multiple
del-multiple-huge-primary-row del-multiple-huge-primary-row
del-multiple-srcdb del-multiple-srcdb
directory_lock directory_lock
diskfull diskfull
dump-env dump-env
env-put-multiple env-put-multiple
env_startup env_startup
execute-updates execute-updates
filesize filesize
helgrind1 helgrind1
helgrind2 helgrind2
helgrind3 helgrind3
hotindexer-bw hotindexer-bw
hotindexer-error-callback hotindexer-error-callback
hotindexer-insert-committed-optimized hotindexer-insert-committed-optimized
hotindexer-insert-committed hotindexer-insert-committed
hotindexer-insert-provisional hotindexer-insert-provisional
hotindexer-lock-test hotindexer-lock-test
hotindexer-multiclient hotindexer-multiclient
hotindexer-nested-insert-committed hotindexer-nested-insert-committed
hotindexer-put-abort hotindexer-put-abort
hotindexer-put-commit hotindexer-put-commit
hotindexer-put-multiple hotindexer-put-multiple
hotindexer-simple-abort hotindexer-simple-abort
hotindexer-simple-abort-put hotindexer-simple-abort-put
hotindexer-undo-do-test hotindexer-undo-do-test
hotindexer-with-queries hotindexer-with-queries
hot-optimize-table-tests hot-optimize-table-tests
insert-dup-prelock insert-dup-prelock
isolation isolation
isolation-read-committed isolation-read-committed
keyrange keyrange
keyrange-merge keyrange-merge
last-verify-time last-verify-time
loader-cleanup-test loader-cleanup-test
loader-create-abort loader-create-abort
loader-create-close loader-create-close
loader-dup-test loader-dup-test
loader-no-puts loader-no-puts
loader-reference-test loader-reference-test
loader-stress-del loader-stress-del
loader-stress-test loader-stress-test
loader-tpch-load loader-tpch-load
lock-pressure lock-pressure
manyfiles manyfiles
maxsize-for-loader maxsize-for-loader
multiprocess multiprocess
mvcc-create-table mvcc-create-table
mvcc-many-committed mvcc-many-committed
mvcc-read-committed mvcc-read-committed
perf_checkpoint_var perf_checkpoint_var
perf_child_txn perf_child_txn
perf_cursor_nop perf_cursor_nop
perf_iibench perf_iibench
perf_insert perf_insert
perf_insert_multiple perf_insert_multiple
perf_malloc_free perf_malloc_free
perf_nop perf_nop
perf_ptquery perf_ptquery
perf_ptquery2 perf_ptquery2
perf_rangequery perf_rangequery
perf_read_write perf_read_txn
perf_txn_single_thread perf_read_txn_single_thread
perf_xmalloc_free perf_read_write
prelock-read-read perf_txn_single_thread
prelock-read-write perf_xmalloc_free
prelock-write-read prelock-read-read
prelock-write-write prelock-read-write
print_engine_status prelock-write-read
powerfail prelock-write-write
preload-db print_engine_status
preload-db-nested powerfail
progress preload-db
put-multiple preload-db-nested
queries_with_deletes progress
recover-2483 put-multiple
recover-3113 queries_with_deletes
recover-5146 recover-2483
recover-compare-db recover-3113
recover-compare-db-descriptor recover-5146
recover-del-multiple recover-compare-db
recover-del-multiple-abort recover-compare-db-descriptor
recover-del-multiple-srcdb-fdelete-all recover-del-multiple
recover-delboth-after-checkpoint recover-del-multiple-abort
recover-delboth-checkpoint recover-del-multiple-srcdb-fdelete-all
recover-descriptor recover-delboth-after-checkpoint
recover-descriptor2 recover-delboth-checkpoint
recover-descriptor3 recover-descriptor
recover-descriptor4 recover-descriptor2
recover-descriptor5 recover-descriptor3
recover-descriptor6 recover-descriptor4
recover-descriptor7 recover-descriptor5
recover-descriptor8 recover-descriptor6
recover-descriptor9 recover-descriptor7
recover-descriptor10 recover-descriptor8
recover-descriptor11 recover-descriptor9
recover-descriptor12 recover-descriptor10
recover-fclose-in-checkpoint recover-descriptor11
recover-fcreate-basementnodesize recover-descriptor12
recover-flt1 recover-fclose-in-checkpoint
recover-flt2 recover-fcreate-basementnodesize
recover-flt3 recover-flt1
recover-flt4 recover-flt2
recover-flt5 recover-flt3
recover-flt6 recover-flt4
recover-flt7 recover-flt5
recover-flt8 recover-flt6
recover-flt9 recover-flt7
recover-flt10 recover-flt8
recover-hotindexer-simple-abort-put recover-flt9
recover-loader-test recover-flt10
recover-lsn-filter-multiple recover-hotindexer-simple-abort-put
recover-put-multiple recover-loader-test
recover-put-multiple-abort recover-lsn-filter-multiple
recover-put-multiple-fdelete-all recover-put-multiple
recover-put-multiple-fdelete-some recover-put-multiple-abort
recover-put-multiple-srcdb-fdelete-all recover-put-multiple-fdelete-all
recover-split-checkpoint recover-put-multiple-fdelete-some
recover-tablelock recover-put-multiple-srcdb-fdelete-all
recover-test-logsuppress recover-split-checkpoint
recover-test-logsuppress-put recover-tablelock
recover-test_stress1 recover-test-logsuppress
recover-test_stress2 recover-test-logsuppress-put
recover-test_stress3 recover-test_stress1
recover-test_stress_openclose recover-test_stress2
recover-upgrade-db-descriptor-multihandle recover-test_stress3
recover-upgrade-db-descriptor recover-test_stress_openclose
recover-update-multiple recover-upgrade-db-descriptor-multihandle
recover-update-multiple-abort recover-upgrade-db-descriptor
recover-update_aborts recover-update-multiple
recover-update_aborts_before_checkpoint recover-update-multiple-abort
recover-update_aborts_before_close recover-update_aborts
recover-update_changes_values recover-update_aborts_before_checkpoint
recover-update_changes_values_before_checkpoint recover-update_aborts_before_close
recover-update_changes_values_before_close recover-update_changes_values
recover-update_broadcast_aborts recover-update_changes_values_before_checkpoint
recover-update_broadcast_aborts2 recover-update_changes_values_before_close
recover-update_broadcast_aborts3 recover-update_broadcast_aborts
recover-update_broadcast_aborts_before_checkpoint recover-update_broadcast_aborts2
recover-update_broadcast_aborts_before_close recover-update_broadcast_aborts3
recover-update_broadcast_changes_values recover-update_broadcast_aborts_before_checkpoint
recover-update_broadcast_changes_values2 recover-update_broadcast_aborts_before_close
recover-update_broadcast_changes_values3 recover-update_broadcast_changes_values
recover-update_broadcast_changes_values_before_checkpoint recover-update_broadcast_changes_values2
recover-update_broadcast_changes_values_before_close recover-update_broadcast_changes_values3
recover-update_changes_values_before_close recover-update_broadcast_changes_values_before_checkpoint
recovery_fileops_stress recover-update_broadcast_changes_values_before_close
recovery_fileops_unit recover-update_changes_values_before_close
recovery_stress recovery_fileops_stress
redirect recovery_fileops_unit
replace-into-write-lock recovery_stress
root_fifo_2 redirect
root_fifo_32 replace-into-write-lock
root_fifo_41 root_fifo_2
seqinsert root_fifo_32
shutdown-3344 root_fifo_41
stat64 seqinsert
stat64-create-modify-times shutdown-3344
stat64_flatten stat64
stat64-null-txn stat64-create-modify-times
stat64-root-changes stat64_flatten
stress-gc stat64-null-txn
stress-gc2 stat64-root-changes
test-xa-prepare stress-gc
test1324 stress-gc2
test1572 test-xa-prepare
test3219 test1324
test3522 test1572
test3522b test3219
test3529 test3522
test_3645 test3522b
test_3529_insert_2 test3529
test_3529_table_lock test_3645
test_3755 test_3529_insert_2
test_4015 test_3529_table_lock
test_4368 test_3755
test_4657 test_4015
test_5015 test_4368
test_5469 test_4657
test-5138 test_5015
test938c test_5469
test_abort1 test-5138
test_abort4 test938c
test_abort5 test_abort1
test_blobs_leaf_split test_abort4
test_bulk_fetch test_abort5
test_compression_methods test_blobs_leaf_split
test_cmp_descriptor test_bulk_fetch
test_db_change_pagesize test_compression_methods
test_db_change_xxx test_cmp_descriptor
test_cursor_delete_2119 test_cursor_with_read_txn
test_db_descriptor test_db_change_pagesize
test_db_descriptor_named_db test_db_change_xxx
test_db_txn_locks_read_uncommitted test_cursor_delete_2119
test_get_max_row_size test_db_descriptor
test_large_update_broadcast_small_cachetable test_db_descriptor_named_db
test_locktree_close test_db_txn_locks_read_uncommitted
test_logflush test_get_max_row_size
test_multiple_checkpoints_block_commit test_large_update_broadcast_small_cachetable
test_query test_locktree_close
test_redirect_func test_logflush
test_row_size_supported test_multiple_checkpoints_block_commit
test_stress0 test_query
test_stress1 test_read_txn_invalid_ops
test_stress2 test_redirect_func
test_stress3 test_row_size_supported
test_stress4 test_simple_read_txn
test_stress5 test_stress0
test_stress6 test_stress1
test_stress7 test_stress2
test_stress_openclose test_stress3
test_stress_with_verify test_stress4
test_stress_hot_indexing test_stress5
test_transactional_descriptor test_stress6
test_trans_desc_during_chkpt test_stress7
test_trans_desc_during_chkpt2 test_stress_openclose
test_trans_desc_during_chkpt3 test_stress_with_verify
test_trans_desc_during_chkpt4 test_stress_hot_indexing
test_txn_abort6 test_transactional_descriptor
test_txn_abort8 test_trans_desc_during_chkpt
test_txn_abort9 test_trans_desc_during_chkpt2
test_txn_close_open_commit test_trans_desc_during_chkpt3
test_txn_commit8 test_trans_desc_during_chkpt4
test_txn_nested1 test_txn_abort6
test_txn_nested2 test_txn_abort8
test_txn_nested3 test_txn_abort9
test_txn_nested4 test_txn_close_open_commit
test_txn_nested5 test_txn_commit8
test_update_abort_works test_txn_nested1
test_update_calls_back test_txn_nested2
test_update_can_delete_elements test_txn_nested3
test_update_changes_values test_txn_nested4
test_update_nonexistent_keys test_txn_nested5
test_update_previously_deleted test_update_abort_works
test_update_stress test_update_calls_back
test_update_txn_snapshot_works_concurrently test_update_can_delete_elements
test_update_txn_snapshot_works_correctly_with_deletes test_update_changes_values
test_update_broadcast_abort_works test_update_nonexistent_keys
test_update_broadcast_calls_back test_update_previously_deleted
test_update_broadcast_can_delete_elements test_update_stress
test_update_broadcast_changes_values test_update_txn_snapshot_works_concurrently
test_update_broadcast_previously_deleted test_update_txn_snapshot_works_correctly_with_deletes
test_update_broadcast_stress test_update_broadcast_abort_works
test_update_broadcast_update_fun_has_choices test_update_broadcast_calls_back
test_update_broadcast_with_empty_table test_update_broadcast_can_delete_elements
test_update_broadcast_indexer test_update_broadcast_changes_values
test_update_broadcast_loader test_update_broadcast_previously_deleted
test_update_broadcast_nested_updates test_update_broadcast_stress
test_update_nested_updates test_update_broadcast_update_fun_has_choices
test_update_with_empty_table test_update_broadcast_with_empty_table
test_updates_single_key test_update_broadcast_indexer
txn-ignore test_update_broadcast_loader
transactional_fileops test_update_broadcast_nested_updates
update-multiple-data-diagonal test_update_nested_updates
update-multiple-key0 test_update_with_empty_table
update-multiple-nochange test_updates_single_key
update-multiple-with-indexer txn-ignore
update transactional_fileops
upgrade_simple update-multiple-data-diagonal
upgrade-test-1 update-multiple-key0
upgrade-test-2 update-multiple-nochange
upgrade-test-3 update-multiple-with-indexer
upgrade-test-4 update
upgrade-test-5 upgrade_simple
upgrade-test-6 upgrade-test-1
upgrade-test-7 upgrade-test-2
zombie_db upgrade-test-3
) upgrade-test-4
set(bdb_srcs ${srcs}) upgrade-test-5
string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}") upgrade-test-6
list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs}) upgrade-test-7
string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}") zombie_db
set(bdb_bins ${bdb_tests}) )
endif() set(bdb_srcs ${srcs})
string(REGEX REPLACE "\\.cc(;|$)" "\\1" bdb_testbases "${bdb_srcs}")
set(tdb_tests_that_should_fail list(REMOVE_ITEM bdb_testbases ${bdb_dontrun_srcs})
test_db_no_env.tdb string(REGEX REPLACE "(.)(;|$)" "\\1.bdb\\2" bdb_tests "${bdb_testbases}")
test_log8.recover set(bdb_bins ${bdb_tests})
test_log9.recover endif()
test_log10.recover
recover-missing-dbfile.abortrecover set(tdb_tests_that_should_fail
recover-missing-dbfile-2.abortrecover test_db_no_env.tdb
loader-tpch-load.loader test_log8.recover
) test_log9.recover
test_log10.recover
## #5138 only reproduces when using the static library. recover-missing-dbfile.abortrecover
list(REMOVE_ITEM tdb_bins test-5138.tdb) recover-missing-dbfile-2.abortrecover
add_executable(test-5138.tdb test-5138) loader-tpch-load.loader
target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS}) )
set_property(TARGET test-5138.tdb APPEND PROPERTY
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") ## #5138 only reproduces when using the static library.
add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden) list(REMOVE_ITEM tdb_bins test-5138.tdb)
add_ydb_test(test-5138.tdb) add_executable(test-5138.tdb test-5138)
target_link_libraries(test-5138.tdb ${LIBTOKUDB}_static z ${LIBTOKUPORTABILITY}_static ${CMAKE_THREAD_LIBS_INIT} ${EXTRA_SYSTEM_LIBS})
foreach(bin ${tdb_bins}) set_property(TARGET test-5138.tdb APPEND PROPERTY
get_filename_component(base ${bin} NAME_WE) COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
add_space_separated_property(TARGET test-5138.tdb COMPILE_FLAGS -fvisibility=hidden)
add_executable(${base}.tdb ${base}) add_ydb_test(test-5138.tdb)
# Some of the symbols in util may not be exported properly by
# libtokudb.so. foreach(bin ${tdb_bins})
# We link the test with util directly so that the test code itself can use get_filename_component(base ${bin} NAME_WE)
# some of those things (i.e. kibbutz in the threaded tests).
target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY}) add_executable(${base}.tdb ${base})
set_property(TARGET ${base}.tdb APPEND PROPERTY # Some of the symbols in util may not be exported properly by
COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1") # libtokudb.so.
add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden) # We link the test with util directly so that the test code itself can use
endforeach(bin) # some of those things (i.e. kibbutz in the threaded tests).
target_link_libraries(${base}.tdb util ${LIBTOKUDB} ${LIBTOKUPORTABILITY})
if(BDB_FOUND) set_property(TARGET ${base}.tdb APPEND PROPERTY
foreach(bin ${bdb_bins}) COMPILE_DEFINITIONS "USE_TDB;IS_TDB=1;TOKUDB=1")
get_filename_component(base ${bin} NAME_WE) add_space_separated_property(TARGET ${base}.tdb COMPILE_FLAGS -fvisibility=hidden)
endforeach(bin)
add_executable(${base}.bdb ${base})
set_property(TARGET ${base}.bdb APPEND PROPERTY if(BDB_FOUND)
COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED") foreach(bin ${bdb_bins})
set_target_properties(${base}.bdb PROPERTIES get_filename_component(base ${bin} NAME_WE)
INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..")
target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES}) add_executable(${base}.bdb ${base})
add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden) set_property(TARGET ${base}.bdb APPEND PROPERTY
endforeach(bin) COMPILE_DEFINITIONS "USE_BDB;IS_TDB=0;TOKU_ALLOW_DEPRECATED")
endif() set_target_properties(${base}.bdb PROPERTIES
INCLUDE_DIRECTORIES "${BDB_INCLUDE_DIR};${CMAKE_CURRENT_BINARY_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../toku_include;${CMAKE_CURRENT_SOURCE_DIR}/../../portability;${CMAKE_CURRENT_SOURCE_DIR}/../..")
foreach(bin loader-cleanup-test.tdb diskfull.tdb) target_link_libraries(${base}.bdb ${LIBTOKUPORTABILITY} ${BDB_LIBRARIES})
set_property(TARGET ${bin} APPEND PROPERTY add_space_separated_property(TARGET ${base}.bdb COMPILE_FLAGS -fvisibility=hidden)
COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES) endforeach(bin)
endforeach(bin) endif()
macro(declare_custom_tests) foreach(bin loader-cleanup-test.tdb diskfull.tdb)
foreach(test ${ARGN}) set_property(TARGET ${bin} APPEND PROPERTY
list(REMOVE_ITEM tdb_tests ${test}) COMPILE_DEFINITIONS DONT_DEPRECATE_WRITES)
endforeach(test) endforeach(bin)
endmacro(declare_custom_tests)
macro(declare_custom_tests)
declare_custom_tests(test1426.tdb) foreach(test ${ARGN})
if(BDB_FOUND) list(REMOVE_ITEM tdb_tests ${test})
macro(declare_custom_bdb_tests) endforeach(test)
foreach(test ${ARGN}) endmacro(declare_custom_tests)
list(REMOVE_ITEM bdb_tests ${test})
endforeach(test) declare_custom_tests(test1426.tdb)
endmacro(declare_custom_bdb_tests) if(BDB_FOUND)
macro(declare_custom_bdb_tests)
declare_custom_bdb_tests(test1426.bdb) foreach(test ${ARGN})
configure_file(run_test1426.sh . COPYONLY) list(REMOVE_ITEM bdb_tests ${test})
add_test(NAME ydb/test1426.tdb endforeach(test)
COMMAND run_test1426.sh endmacro(declare_custom_bdb_tests)
$<TARGET_FILE:test1426.tdb> $<TARGET_FILE:test1426.bdb>
"test1426.tdb.ctest-data" "test1426.bdb.ctest-data" declare_custom_bdb_tests(test1426.bdb)
$<TARGET_FILE:tokudb_dump> "${BDB_INCLUDE_DIR}/../bin/db_dump") configure_file(run_test1426.sh . COPYONLY)
add_dependencies(test1426.tdb tokudb_dump) add_test(NAME ydb/test1426.tdb
endif() COMMAND run_test1426.sh
$<TARGET_FILE:test1426.tdb> $<TARGET_FILE:test1426.bdb>
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}") "test1426.tdb.ctest-data" "test1426.bdb.ctest-data"
declare_custom_tests(${recover_would_be_tdb_tests}) $<TARGET_FILE:tokudb_dump> "${BDB_INCLUDE_DIR}/../bin/db_dump")
add_dependencies(test1426.tdb tokudb_dump)
declare_custom_tests(powerfail.tdb) endif()
add_test(ydb/powerfail.tdb echo must run powerfail by hand)
string(REGEX REPLACE "\\.cc(;|$)" ".tdb\\1" recover_would_be_tdb_tests "${recover_srcs}")
declare_custom_tests(checkpoint_stress.tdb) declare_custom_tests(${recover_would_be_tdb_tests})
configure_file(run_checkpoint_stress_test.sh . COPYONLY)
add_test(NAME ydb/checkpoint_stress.tdb declare_custom_tests(powerfail.tdb)
COMMAND run_checkpoint_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137) add_test(ydb/powerfail.tdb echo must run powerfail by hand)
setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb)
declare_custom_tests(checkpoint_stress.tdb)
configure_file(run_recover_stress_test.sh . COPYONLY) configure_file(run_checkpoint_stress_test.sh . COPYONLY)
add_test(NAME ydb/recover_stress.tdb add_test(NAME ydb/checkpoint_stress.tdb
COMMAND run_recover_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137) COMMAND run_checkpoint_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb) setup_toku_test_properties(ydb/checkpoint_stress.tdb checkpoint_stress.tdb)
declare_custom_tests(diskfull.tdb) configure_file(run_recover_stress_test.sh . COPYONLY)
configure_file(run_diskfull_test.sh . COPYONLY) add_test(NAME ydb/recover_stress.tdb
add_test(NAME ydb/diskfull.tdb COMMAND run_recover_stress_test.sh $<TARGET_FILE:checkpoint_stress.tdb> 5 5001 137)
COMMAND run_diskfull_test.sh $<TARGET_FILE:diskfull.tdb> 134) setup_toku_test_properties(ydb/recover_stress.tdb recover_stress.tdb)
setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb)
declare_custom_tests(diskfull.tdb)
declare_custom_tests(recovery_fileops_unit.tdb) configure_file(run_diskfull_test.sh . COPYONLY)
configure_file(run_recovery_fileops_unit.sh . COPYONLY) add_test(NAME ydb/diskfull.tdb
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir") COMMAND run_diskfull_test.sh $<TARGET_FILE:diskfull.tdb> 134)
foreach(ov c d r) setup_toku_test_properties(ydb/diskfull.tdb diskfull.tdb)
if (ov STREQUAL c) declare_custom_tests(recovery_fileops_unit.tdb)
set(gset 0) configure_file(run_recovery_fileops_unit.sh . COPYONLY)
set(hset 0) file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/recovery_fileops_unit_dir")
else () foreach(ov c d r)
set(gset 0 1 2 3 4 5)
set(hset 0 1) if (ov STREQUAL c)
endif () set(gset 0)
set(hset 0)
foreach(av 0 1) else ()
foreach(bv 0 1) set(gset 0 1 2 3 4 5)
set(hset 0 1)
if (bv) endif ()
set(dset 0 1)
set(eset 0 1) foreach(av 0 1)
else () foreach(bv 0 1)
set(dset 0)
set(eset 0) if (bv)
endif () set(dset 0 1)
set(eset 0 1)
foreach(cv 0 1 2) else ()
foreach(dv ${dset}) set(dset 0)
foreach(ev ${eset}) set(eset 0)
foreach(fv 0 1) endif ()
foreach(gv ${gset})
foreach(hv ${hset}) foreach(cv 0 1 2)
foreach(dv ${dset})
if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv))) foreach(ev ${eset})
set(iset 0 1) foreach(fv 0 1)
else () foreach(gv ${gset})
set(iset 0) foreach(hv ${hset})
endif ()
if ((NOT ov STREQUAL c) AND (NOT cv) AND ((NOT bv) OR (NOT ev) OR (dv)))
foreach(iv ${iset}) set(iset 0 1)
set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") else ()
set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}") set(iset 0)
set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors") endif ()
add_test(NAME ${testname}
COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137 foreach(iv ${iset})
-O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv} set(testname "ydb/recovery_fileops_unit.${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
) set(envdir "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}")
setup_toku_test_properties(${testname} ${envdir}) set(errfile "recovery_fileops_unit_dir/${ov}${av}${bv}${cv}${dv}${ev}${fv}${gv}${hv}${iv}.ctest-errors")
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}") add_test(NAME ${testname}
endforeach(iv) COMMAND run_recovery_fileops_unit.sh $<TARGET_FILE:recovery_fileops_unit.tdb> ${errfile} 137
endforeach(hv) -O ${ov} -A ${av} -B ${bv} -C ${cv} -D ${dv} -E ${ev} -F ${fv} -G ${gv} -H ${hv} -I ${iv}
endforeach(gv) )
endforeach(fv) setup_toku_test_properties(${testname} ${envdir})
endforeach(ev) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${errfile}")
endforeach(dv) endforeach(iv)
endforeach(cv) endforeach(hv)
endforeach(bv) endforeach(gv)
endforeach(av) endforeach(fv)
endforeach(ov) endforeach(ev)
endforeach(dv)
if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR endforeach(cv)
(CMAKE_CXX_COMPILER_ID STREQUAL Intel AND endforeach(bv)
CMAKE_BUILD_TYPE STREQUAL Release) endforeach(av)
OR USE_GCOV)) endforeach(ov)
declare_custom_tests(helgrind1.tdb)
add_test(NAME ydb/helgrind_helgrind1.tdb if (NOT (CMAKE_SYSTEM_NAME MATCHES Darwin OR
COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>) (CMAKE_CXX_COMPILER_ID STREQUAL Intel AND
setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb) CMAKE_BUILD_TYPE STREQUAL Release)
set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE) OR USE_GCOV))
endif() declare_custom_tests(helgrind1.tdb)
declare_custom_tests(helgrind2.tdb) add_test(NAME ydb/helgrind_helgrind1.tdb
declare_custom_tests(helgrind3.tdb) COMMAND valgrind --quiet --tool=helgrind --error-exitcode=1 --log-file=helgrind1.tdb.deleteme $<TARGET_FILE:helgrind1.tdb>)
add_ydb_helgrind_test(helgrind2.tdb) setup_toku_test_properties(ydb/helgrind_helgrind1.tdb helgrind_helgrind1.tdb)
add_ydb_helgrind_test(helgrind3.tdb) set_tests_properties(ydb/helgrind_helgrind1.tdb PROPERTIES WILL_FAIL TRUE)
endif()
declare_custom_tests(test_groupcommit_count.tdb) declare_custom_tests(helgrind2.tdb)
add_ydb_test(test_groupcommit_count.tdb -n 1) declare_custom_tests(helgrind3.tdb)
add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1) add_ydb_helgrind_test(helgrind2.tdb)
add_ydb_drd_test(test_groupcommit_count.tdb -n 1) add_ydb_helgrind_test(helgrind3.tdb)
add_ydb_drd_test(test_4015.tdb) declare_custom_tests(test_groupcommit_count.tdb)
add_ydb_test(test_groupcommit_count.tdb -n 1)
# We link the locktree so that stress test 0 can call some add_ydb_helgrind_test(test_groupcommit_count.tdb -n 1)
# functions (ie: lock escalation) directly. add_ydb_drd_test(test_groupcommit_count.tdb -n 1)
target_link_libraries(test_stress0.tdb locktree)
add_ydb_drd_test(test_4015.tdb)
# Set up default stress tests and drd tests. Exclude hot_index.
foreach(src ${stress_test_srcs}) # We link the locktree so that stress test 0 can call some
if(NOT ${src} MATCHES hot_index) # functions (ie: lock escalation) directly.
get_filename_component(base ${src} NAME_WE) target_link_libraries(test_stress0.tdb locktree)
set(test ${base}.tdb)
# Set up default stress tests and drd tests. Exclude hot_index.
if (${src} MATCHES test_stress0) foreach(src ${stress_test_srcs})
add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600) if(NOT ${src} MATCHES hot_index)
else () get_filename_component(base ${src} NAME_WE)
add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600) set(test ${base}.tdb)
endif ()
if (${src} MATCHES test_stress0)
add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000) add_ydb_test(${test} --num_elements 512 --num_seconds 1000 --join_timeout 600)
set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600) else ()
add_ydb_test(${test} --num_elements 150000 --num_seconds 1000 --join_timeout 600)
add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000) endif ()
setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400) add_ydb_drd_test_aux(drd_tiny_${test} ${test} --num_seconds 5 --num_elements 150 --join_timeout 3000)
set_tests_properties(ydb/drd_mid_${test} PROPERTIES set_tests_properties(ydb/drd_tiny_${test} PROPERTIES TIMEOUT 3600)
DEPENDS ydb/drd_mid_${test}/prepare
REQUIRED_FILES "drd_mid_${test}.ctest-data" add_test(ydb/drd_mid_${test}/prepare ${test} --only_create --num_elements 10000)
TIMEOUT 15000 setup_toku_test_properties(ydb/drd_mid_${test}/prepare drd_mid_${test})
) add_ydb_drd_test_aux(drd_mid_${test} ${test} --only_stress --num_elements 10000 --num_seconds 100 --join_timeout 14400)
set_tests_properties(ydb/drd_mid_${test} PROPERTIES
add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000) DEPENDS ydb/drd_mid_${test}/prepare
setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test}) REQUIRED_FILES "drd_mid_${test}.ctest-data"
add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800) TIMEOUT 15000
set_tests_properties(ydb/drd_large_${test} PROPERTIES )
DEPENDS ydb/drd_large_${test}/prepare
REQUIRED_FILES "drd_large_${test}.ctest-data" add_test(ydb/drd_large_${test}/prepare ${test} --only_create --num_elements 150000)
TIMEOUT 30000 setup_toku_test_properties(ydb/drd_large_${test}/prepare drd_large_${test})
) add_ydb_drd_test_aux(drd_large_${test} ${test} --only_stress --num_elements 150000 --num_seconds 1000 --join_timeout 28800)
endif() set_tests_properties(ydb/drd_large_${test} PROPERTIES
endforeach(src) DEPENDS ydb/drd_large_${test}/prepare
REQUIRED_FILES "drd_large_${test}.ctest-data"
# Set up upgrade tests. Exclude test_stress_openclose TIMEOUT 30000
foreach(src ${stress_test_srcs}) )
if (NOT ${src} MATCHES test_stress_openclose) endif()
get_filename_component(base ${src} NAME_WE) endforeach(src)
set(test ${base}.tdb)
# Set up upgrade tests. Exclude test_stress_openclose
foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3) foreach(src ${stress_test_srcs})
set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver}) if (NOT ${src} MATCHES test_stress_openclose)
if (NOT EXISTS "${versiondir}/") get_filename_component(base ${src} NAME_WE)
message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.") set(test ${base}.tdb)
endif ()
foreach(p_or_s pristine stressed) foreach(oldver 4.2.0 5.0.8 5.2.7 6.0.0 6.1.0 6.5.1 6.6.3)
if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed)) set(versiondir ${TOKU_SVNROOT}/tokudb.data/old-stress-test-envs/${oldver})
foreach(size 2000) if (NOT EXISTS "${versiondir}/")
set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir") message(WARNING "Test data for upgrade tests for version ${oldver} doesn't exist, check out ${versiondir}/*-2000-dir first or upgrade stress tests may fail.")
set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}") endif ()
set(envdir "${envdirbase}.ctest-data") foreach(p_or_s pristine stressed)
set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size}) if (NOT (${base} MATCHES test_stress4 AND ${p_or_s} MATCHES stressed))
foreach(size 2000)
add_test(NAME ${testnamebase}/remove set(oldenvdir "${versiondir}/saved${p_or_s}-${size}-dir")
COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}") set(envdirbase "${upgrade}_${oldver}_${p_or_s}_${size}_${test}")
add_test(NAME ${testnamebase}/copy set(envdir "${envdirbase}.ctest-data")
COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}") set(testnamebase ydb/${test}/upgrade/${oldver}/${p_or_s}/${size})
set_tests_properties(${testnamebase}/copy PROPERTIES
DEPENDS ${testnamebase}/remove add_test(NAME ${testnamebase}/remove
REQUIRED_FILES "${oldenvdir}") COMMAND ${CMAKE_COMMAND} -E remove_directory "${envdir}")
add_test(NAME ${testnamebase}/copy
add_test(NAME ${testnamebase} COMMAND ${CMAKE_COMMAND} -E copy_directory "${oldenvdir}" "${envdir}")
COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200) set_tests_properties(${testnamebase}/copy PROPERTIES
setup_toku_test_properties(${testnamebase} "${envdirbase}") DEPENDS ${testnamebase}/remove
set_tests_properties(${testnamebase} PROPERTIES REQUIRED_FILES "${oldenvdir}")
DEPENDS ${testnamebase}/copy
REQUIRED_FILES "${envdir}" add_test(NAME ${testnamebase}
TIMEOUT 10800) COMMAND ${test} --only_stress --num_elements ${size} --num_seconds 600 --join_timeout 7200)
endforeach(size) setup_toku_test_properties(${testnamebase} "${envdirbase}")
endif () set_tests_properties(${testnamebase} PROPERTIES
endforeach(p_or_s) DEPENDS ${testnamebase}/copy
endforeach(oldver) REQUIRED_FILES "${envdir}"
endif () TIMEOUT 10800)
endforeach(src) endforeach(size)
endif ()
if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/") endforeach(p_or_s)
message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.") endforeach(oldver)
endif () endif ()
declare_custom_tests(dump-env.tdb) endforeach(src)
add_test(NAME ydb/dump-env.tdb/remove
COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data") if (NOT EXISTS "${TOKU_SVNROOT}/tokudb.data/test_5902/")
add_test(NAME ydb/dump-env.tdb/copy message(WARNING "Test data for dump-env.tdb doesn't exist, check out ${TOKU_SVNROOT}/tokudb.data/test_5902 first or dump-env.tdb may fail.")
COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data") endif ()
set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES declare_custom_tests(dump-env.tdb)
DEPENDS ydb/dump-env.tdb/remove add_test(NAME ydb/dump-env.tdb/remove
REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902") COMMAND ${CMAKE_COMMAND} -E remove_directory "dump-env.tdb.ctest-data")
add_ydb_test(dump-env.tdb) add_test(NAME ydb/dump-env.tdb/copy
set_tests_properties(ydb/dump-env.tdb PROPERTIES COMMAND ${CMAKE_COMMAND} -E copy_directory "${TOKU_SVNROOT}/tokudb.data/test_5902" "dump-env.tdb.ctest-data")
DEPENDS ydb/dump-env.tdb/copy set_tests_properties(ydb/dump-env.tdb/copy PROPERTIES
REQUIRED_FILES "dump-env.tdb.ctest-data") DEPENDS ydb/dump-env.tdb/remove
REQUIRED_FILES "${TOKU_SVNROOT}/tokudb.data/test_5902")
## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case add_ydb_test(dump-env.tdb)
#declare_custom_tests(test_thread_stack.tdb) set_tests_properties(ydb/dump-env.tdb PROPERTIES
#add_custom_command(OUTPUT run_test_thread_stack.sh DEPENDS ydb/dump-env.tdb/copy
# COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}" REQUIRED_FILES "dump-env.tdb.ctest-data")
# MAIN_DEPENDENCY run_test_thread_stack.sh
# VERBATIM) ## for some reason this rule doesn't run with the makefile and it crashes with this rule, so I'm disabling this special case
#add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh) #declare_custom_tests(test_thread_stack.tdb)
#add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb") #add_custom_command(OUTPUT run_test_thread_stack.sh
# COMMAND install "${CMAKE_CURRENT_SOURCE_DIR}/run_test_thread_stack.sh" "${CMAKE_CFG_INTDIR}"
declare_custom_tests(root_fifo_41.tdb) # MAIN_DEPENDENCY run_test_thread_stack.sh
foreach(num RANGE 1 100) # VERBATIM)
add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate) #add_custom_target(install_run_test_thread_stack.sh ALL DEPENDS run_test_thread_stack.sh)
add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num}) #add_test(ydb/test_thread_stack.tdb run_test_thread_stack.sh "${CMAKE_CFG_INTDIR}/test_thread_stack.tdb")
endforeach(num)
declare_custom_tests(root_fifo_41.tdb)
add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000) foreach(num RANGE 1 100)
add_ydb_test_aux(root_fifo_41_${num}_populate.tdb root_fifo_41.tdb -n ${num} -populate)
declare_custom_tests(test_abort4.tdb) add_ydb_test_aux(root_fifo_41_${num}_nopopulate.tdb root_fifo_41.tdb -n ${num})
foreach(num RANGE -1 19) endforeach(num)
add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num})
add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num}) add_ydb_test_aux(test3039_small.tdb test3039.tdb -n 1000)
endforeach(num)
declare_custom_tests(test_abort4.tdb)
set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown") foreach(num RANGE -1 19)
if (NOT EXISTS "${old_loader_upgrade_data}/") add_ydb_test_aux(test_abort4_${num}_0.tdb test_abort4.tdb -c 0 -l ${num})
message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.") add_ydb_test_aux(test_abort4_${num}_1.tdb test_abort4.tdb -c 1 -l ${num})
endif () endforeach(num)
function(add_loader_upgrade_test name bin)
add_test(NAME ydb/${name}/remove set(old_loader_upgrade_data "${TOKU_SVNROOT}/tokudb.data/env_preload.4.2.0.emptydictionaries.cleanshutdown")
COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data") if (NOT EXISTS "${old_loader_upgrade_data}/")
add_test(NAME ydb/${name}/copy message(WARNING "Test data for loader upgrade tests doesn't exist, check out ${old_loader_upgrade_data} first, or loader-stress-test3.tdb may fail.")
COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data") endif ()
set_tests_properties(ydb/${name}/copy PROPERTIES function(add_loader_upgrade_test name bin)
DEPENDS ydb/${name}/remove add_test(NAME ydb/${name}/remove
REQUIRED_FILES "${old_loader_upgrade_data}") COMMAND ${CMAKE_COMMAND} -E remove_directory "${name}.ctest-data")
add_ydb_test_aux(${name} ${bin} -u ${ARGN}) add_test(NAME ydb/${name}/copy
set_tests_properties(ydb/${name} PROPERTIES COMMAND ${CMAKE_COMMAND} -E copy_directory "${old_loader_upgrade_data}" "${name}.ctest-data")
DEPENDS ydb/${name}/copy set_tests_properties(ydb/${name}/copy PROPERTIES
REQUIRED_FILES "${name}.ctest-data") DEPENDS ydb/${name}/remove
endfunction(add_loader_upgrade_test) REQUIRED_FILES "${old_loader_upgrade_data}")
add_ydb_test_aux(${name} ${bin} -u ${ARGN})
list(REMOVE_ITEM loader_tests loader-stress-test.loader) set_tests_properties(ydb/${name} PROPERTIES
add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c) DEPENDS ydb/${name}/copy
add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p) REQUIRED_FILES "${name}.ctest-data")
add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s) endfunction(add_loader_upgrade_test)
add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c) list(REMOVE_ITEM loader_tests loader-stress-test.loader)
add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z) add_ydb_test_aux(loader-stress-test0.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z) add_ydb_test_aux(loader-stress-test1.tdb loader-stress-test.tdb -c -p)
add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z) add_ydb_test_aux(loader-stress-test2.tdb loader-stress-test.tdb -r 5000 -s)
add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z) add_loader_upgrade_test(loader-stress-test3.tdb loader-stress-test.tdb -c)
add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28) add_ydb_test_aux(loader-stress-test4.tdb loader-stress-test.tdb -r 10000000 -c)
add_ydb_test_aux(loader-stress-test0z.tdb loader-stress-test.tdb -c -z)
list(REMOVE_ITEM loader_tests loader-dup-test.loader) add_ydb_test_aux(loader-stress-test1z.tdb loader-stress-test.tdb -c -p -z)
add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb) add_ydb_test_aux(loader-stress-test2z.tdb loader-stress-test.tdb -r 5000 -s -z)
add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000) add_loader_upgrade_test(loader-stress-test3z.tdb loader-stress-test.tdb -c -z)
add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000) add_ydb_test_aux(loader-stress-test4z.tdb loader-stress-test.tdb -r 500000 -c -z --valsize 28)
add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100)
add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000) list(REMOVE_ITEM loader_tests loader-dup-test.loader)
add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E) add_ydb_test_aux(loader-dup-test0.tdb loader-dup-test.tdb)
add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z) add_ydb_test_aux(loader-dup-test1.tdb loader-dup-test.tdb -d 1 -r 500000)
add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z) add_ydb_test_aux(loader-dup-test2.tdb loader-dup-test.tdb -d 1 -r 1000000)
add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z) add_ydb_test_aux(loader-dup-test3.tdb loader-dup-test.tdb -d 1 -s -r 100)
add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z) add_ydb_test_aux(loader-dup-test4.tdb loader-dup-test.tdb -d 1 -s -r 1000)
add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z) add_ydb_test_aux(loader-dup-test5.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E)
add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z) add_ydb_test_aux(loader-dup-test0z.tdb loader-dup-test.tdb -z)
add_ydb_test_aux(loader-dup-test1z.tdb loader-dup-test.tdb -d 1 -r 500000 -z)
## as part of #4503, we took out test 1 and 3 add_ydb_test_aux(loader-dup-test2z.tdb loader-dup-test.tdb -d 1 -r 1000000 -z)
list(REMOVE_ITEM loader_tests loader-cleanup-test.loader) add_ydb_test_aux(loader-dup-test3z.tdb loader-dup-test.tdb -d 1 -s -r 100 -z)
add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800) add_ydb_test_aux(loader-dup-test4z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -z)
#add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p) add_ydb_test_aux(loader-dup-test5z.tdb loader-dup-test.tdb -d 1 -s -r 1000 -E -z)
add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000)
#add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p) ## as part of #4503, we took out test 1 and 3
add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z) list(REMOVE_ITEM loader_tests loader-cleanup-test.loader)
add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z) add_ydb_test_aux(loader-cleanup-test0.tdb loader-cleanup-test.tdb -s -r 800)
#add_ydb_test_aux(loader-cleanup-test1.tdb loader-cleanup-test.tdb -s -r 800 -p)
declare_custom_tests(keyrange.tdb) add_ydb_test_aux(loader-cleanup-test2.tdb loader-cleanup-test.tdb -s -r 8000)
add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0) #add_ydb_test_aux(loader-cleanup-test3.tdb loader-cleanup-test.tdb -s -r 8000 -p)
add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1) add_ydb_test_aux(loader-cleanup-test0z.tdb loader-cleanup-test.tdb -s -r 800 -z)
if (0) add_ydb_test_aux(loader-cleanup-test2z.tdb loader-cleanup-test.tdb -s -r 8000 -z)
add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1)
add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1) declare_custom_tests(keyrange.tdb)
else () add_ydb_test_aux(keyrange-get0.tdb keyrange.tdb --get 0)
message(WARNING "TODO(leif): re-enable keyrange tests, see #5666") add_ydb_test_aux(keyrange-get1.tdb keyrange.tdb --get 1)
endif () if (0)
add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1) add_ydb_test_aux(keyrange-random-get0.tdb keyrange.tdb --get 0 --random_keys 1)
add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1) add_ydb_test_aux(keyrange-random-get1.tdb keyrange.tdb --get 1 --random_keys 1)
else ()
declare_custom_tests(maxsize-for-loader.tdb) message(WARNING "TODO(leif): re-enable keyrange tests, see #5666")
add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c) endif ()
add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c) add_ydb_test_aux(keyrange-loader-get0.tdb keyrange.tdb --get 0 --loader 1)
add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c) add_ydb_test_aux(keyrange-loader-get1.tdb keyrange.tdb --get 1 --loader 1)
add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c)
declare_custom_tests(maxsize-for-loader.tdb)
declare_custom_tests(hotindexer-undo-do-test.tdb) add_ydb_test_aux(maxsize-for-loader-A.tdb maxsize-for-loader.tdb -f -c)
file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test") add_ydb_test_aux(maxsize-for-loader-B.tdb maxsize-for-loader.tdb -c)
file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result") add_ydb_test_aux(maxsize-for-loader-Az.tdb maxsize-for-loader.tdb -f -z -c)
configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY) add_ydb_test_aux(maxsize-for-loader-Bz.tdb maxsize-for-loader.tdb -z -c)
foreach(result ${hotindexer_results})
configure_file(${result} ${result} COPYONLY) declare_custom_tests(hotindexer-undo-do-test.tdb)
endforeach(result) file(GLOB hotindexer_tests RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.test")
foreach(test ${hotindexer_tests}) file(GLOB hotindexer_results RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "hotindexer-undo-do-tests/*.result")
configure_file(${test} ${test} COPYONLY) configure_file(run-hotindexer-undo-do-tests.bash . COPYONLY)
add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test}) foreach(result ${hotindexer_results})
setup_toku_test_properties(ydb/${test} ${test}) configure_file(${result} ${result} COPYONLY)
endforeach() endforeach(result)
foreach(test ${hotindexer_tests})
foreach(test ${tdb_tests} ${bdb_tests}) configure_file(${test} ${test} COPYONLY)
add_ydb_test(${test}) add_test(NAME ydb/${test} COMMAND run-hotindexer-undo-do-tests.bash ${test})
endforeach(test) setup_toku_test_properties(ydb/${test} ${test})
endforeach()
configure_file(run_recover_test.sh . COPYONLY)
foreach(recover_test ${recover_tests}) foreach(test ${tdb_tests} ${bdb_tests})
get_filename_component(base ${recover_test} NAME_WE) add_ydb_test(${test})
add_test(NAME ydb/${recover_test} endforeach(test)
COMMAND run_recover_test.sh $<TARGET_FILE:${base}.tdb> "${recover_test}.ctest-data" $<TARGET_FILE:tdb-recover> $<TARGET_FILE:tokudb_dump>)
setup_toku_test_properties(ydb/${recover_test} ${recover_test}) configure_file(run_recover_test.sh . COPYONLY)
endforeach(recover_test) foreach(recover_test ${recover_tests})
get_filename_component(base ${recover_test} NAME_WE)
configure_file(run_abortrecover_test.sh . COPYONLY) add_test(NAME ydb/${recover_test}
foreach(abortrecover_test ${abortrecover_tests}) COMMAND run_recover_test.sh $<TARGET_FILE:${base}.tdb> "${recover_test}.ctest-data" $<TARGET_FILE:tdb-recover> $<TARGET_FILE:tokudb_dump>)
get_filename_component(base ${abortrecover_test} NAME_WE) setup_toku_test_properties(ydb/${recover_test} ${recover_test})
add_test(NAME ydb/${abortrecover_test} endforeach(recover_test)
COMMAND run_abortrecover_test.sh $<TARGET_FILE:${base}.tdb>)
setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test}) configure_file(run_abortrecover_test.sh . COPYONLY)
endforeach(abortrecover_test) foreach(abortrecover_test ${abortrecover_tests})
## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected get_filename_component(base ${abortrecover_test} NAME_WE)
# foreach(abortrecover_test ${abortrecover_tests}) add_test(NAME ydb/${abortrecover_test}
# get_filename_component(base ${abortrecover_test} NAME_WE) COMMAND run_abortrecover_test.sh $<TARGET_FILE:${base}.tdb>)
# set(test ${base}.tdb) setup_toku_test_properties(ydb/${abortrecover_test} ${abortrecover_test})
# add_test(NAME ydb/${test}/abort endforeach(abortrecover_test)
# COMMAND ${test} --test) ## alternate implementation, doesn't work because the abort phase crashes and we can't tell cmake that's expected
# setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test}) # foreach(abortrecover_test ${abortrecover_tests})
# set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE) # get_filename_component(base ${abortrecover_test} NAME_WE)
# set(test ${base}.tdb)
# add_test(NAME ydb/${test}/recover # add_test(NAME ydb/${test}/abort
# COMMAND ${test} --recover) # COMMAND ${test} --test)
# setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test}) # setup_toku_test_properties(ydb/${test}/abort ${abortrecover_test})
# set_tests_properties(ydb/${test}/recover PROPERTIES # set_tests_properties(ydb/${test}/abort PROPERTIES WILL_FAIL TRUE)
# DEPENDS ydb/${test}/abort
# REQUIRED_FILES "${abortrecover_test}.ctest-data") # add_test(NAME ydb/${test}/recover
# endforeach(abortrecover_test) # COMMAND ${test} --recover)
# setup_toku_test_properties(ydb/${test}/recover ${abortrecover_test})
foreach(loader_test ${loader_tests}) # set_tests_properties(ydb/${test}/recover PROPERTIES
get_filename_component(base ${loader_test} NAME_WE) # DEPENDS ydb/${test}/abort
add_ydb_test_aux(${base}.nop.loader ${base}.tdb) # REQUIRED_FILES "${abortrecover_test}.ctest-data")
add_ydb_test_aux(${base}.p.loader ${base}.tdb -p) # endforeach(abortrecover_test)
add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z)
if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader") foreach(loader_test ${loader_tests})
list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader) get_filename_component(base ${loader_test} NAME_WE)
list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader) add_ydb_test_aux(${base}.nop.loader ${base}.tdb)
endif() add_ydb_test_aux(${base}.p.loader ${base}.tdb -p)
endforeach(loader_test) add_ydb_test_aux(${base}.comp.loader ${base}.tdb -z)
if("${tdb_tests_that_should_fail}" MATCHES "${base}.loader")
set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}") list(REMOVE_ITEM tdb_tests_that_should_fail ${base}.loader)
string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}") list(APPEND tdb_tests_that_should_fail ${base}.nop.loader ${base}.p.loader ${base}.comp.loader)
set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE) endif()
endforeach(loader_test)
## give some tests, that time out normally, 1 hour to complete
set(long_tests set(tdb_tests_that_should_fail "ydb/${tdb_tests_that_should_fail}")
ydb/checkpoint_1.tdb string(REGEX REPLACE ";" ";ydb/" tdb_tests_that_should_fail "${tdb_tests_that_should_fail}")
ydb/drd_test_groupcommit_count.tdb set_tests_properties(${tdb_tests_that_should_fail} PROPERTIES WILL_FAIL TRUE)
ydb/env-put-multiple.tdb
ydb/filesize.tdb ## give some tests, that time out normally, 1 hour to complete
ydb/loader-cleanup-test0.tdb set(long_tests
ydb/loader-cleanup-test0z.tdb ydb/checkpoint_1.tdb
ydb/manyfiles.tdb ydb/drd_test_groupcommit_count.tdb
ydb/recover-loader-test.abortrecover ydb/env-put-multiple.tdb
ydb/recovery_fileops_stress.tdb ydb/filesize.tdb
ydb/root_fifo_1.tdb ydb/loader-cleanup-test0.tdb
ydb/root_fifo_2.tdb ydb/loader-cleanup-test0z.tdb
ydb/root_fifo_31.tdb ydb/manyfiles.tdb
ydb/root_fifo_32.tdb ydb/recover-loader-test.abortrecover
ydb/shutdown-3344.tdb ydb/recovery_fileops_stress.tdb
ydb/stat64-create-modify-times.tdb ydb/root_fifo_1.tdb
ydb/test1572.tdb ydb/root_fifo_2.tdb
ydb/test_abort4_19_0.tdb ydb/root_fifo_31.tdb
ydb/test_abort4_19_1.tdb ydb/root_fifo_32.tdb
ydb/test_abort5.tdb ydb/shutdown-3344.tdb
ydb/test_archive1.tdb ydb/stat64-create-modify-times.tdb
ydb/test_logmax.tdb ydb/test1572.tdb
ydb/test_query.tdb ydb/test_abort4_19_0.tdb
ydb/test_txn_abort5.tdb ydb/test_abort4_19_1.tdb
ydb/test_txn_abort5a.tdb ydb/test_abort5.tdb
ydb/test_txn_abort6.tdb ydb/test_archive1.tdb
ydb/test_txn_nested2.tdb ydb/test_logmax.tdb
ydb/test_txn_nested4.tdb ydb/test_query.tdb
ydb/test_txn_nested5.tdb ydb/test_txn_abort5.tdb
ydb/test_update_broadcast_stress.tdb ydb/test_txn_abort5a.tdb
) ydb/test_txn_abort6.tdb
if (BDB_FOUND) ydb/test_txn_nested2.tdb
list(APPEND long_tests ydb/test_txn_nested4.tdb
ydb/root_fifo_1.bdb ydb/test_txn_nested5.tdb
ydb/root_fifo_31.bdb ydb/test_update_broadcast_stress.tdb
ydb/rowsize.bdb )
ydb/test_log10.bdb if (BDB_FOUND)
ydb/test_log7.bdb list(APPEND long_tests
ydb/test_logmax.bdb ydb/root_fifo_1.bdb
) ydb/root_fifo_31.bdb
endif (BDB_FOUND) ydb/rowsize.bdb
set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600) ydb/test_log10.bdb
## some take even longer, with valgrind ydb/test_log7.bdb
set(extra_long_tests ydb/test_logmax.bdb
ydb/drd_test_4015.tdb )
ydb/hotindexer-with-queries.tdb endif (BDB_FOUND)
ydb/hot-optimize-table-tests.tdb set_tests_properties(${long_tests} PROPERTIES TIMEOUT 3600)
ydb/loader-cleanup-test2.tdb ## some take even longer, with valgrind
ydb/loader-cleanup-test2z.tdb set(extra_long_tests
ydb/loader-dup-test0.tdb ydb/drd_test_4015.tdb
ydb/loader-stress-del.nop.loader ydb/hotindexer-with-queries.tdb
ydb/loader-stress-del.p.loader ydb/hot-optimize-table-tests.tdb
ydb/loader-stress-del.comp.loader ydb/loader-cleanup-test2.tdb
ydb/test3039.tdb ydb/loader-cleanup-test2z.tdb
ydb/test_update_stress.tdb ydb/loader-dup-test0.tdb
) ydb/loader-stress-del.nop.loader
if (BDB_FOUND) ydb/loader-stress-del.p.loader
list(APPEND extra_long_tests ydb/loader-stress-del.comp.loader
ydb/test_groupcommit_count.bdb ydb/test3039.tdb
) ydb/test_update_stress.tdb
endif (BDB_FOUND) )
set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200) if (BDB_FOUND)
## these really take a long time with valgrind list(APPEND extra_long_tests
set(phenomenally_long_tests ydb/test_groupcommit_count.bdb
ydb/checkpoint_stress.tdb )
ydb/loader-stress-test4.tdb endif (BDB_FOUND)
ydb/loader-stress-test4z.tdb set_tests_properties(${extra_long_tests} PROPERTIES TIMEOUT 7200)
ydb/recover_stress.tdb ## these really take a long time with valgrind
ydb/test3529.tdb set(phenomenally_long_tests
) ydb/checkpoint_stress.tdb
if (BDB_FOUND) ydb/loader-stress-test4.tdb
list(APPEND phenomenally_long_tests ydb/loader-stress-test4z.tdb
ydb/test1426.tdb ydb/recover_stress.tdb
) ydb/test3529.tdb
endif (BDB_FOUND) )
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400) if (BDB_FOUND)
endif(BUILD_TESTING OR BUILD_SRC_TESTS) list(APPEND phenomenally_long_tests
ydb/test1426.tdb
)
endif (BDB_FOUND)
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
static int create_child_txn(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) { static int create_child_txn(DB_TXN* txn, ARG arg, void* UU(operation_extra), void *UU(stats_extra)) {
DB_TXN* child_txn = NULL; DB_TXN* child_txn = NULL;
DB_ENV* env = arg->env; DB_ENV* env = arg->env;
int r = env->txn_begin(env, txn, &child_txn, arg->txn_type); int r = env->txn_begin(env, txn, &child_txn, arg->txn_flags);
CKERR(r); CKERR(r);
r = child_txn->commit(child_txn, 0); r = child_txn->commit(child_txn, 0);
CKERR(r); CKERR(r);
......
...@@ -376,6 +376,7 @@ stress_table(DB_ENV* env, DB **dbs, struct cli_args *cli_args) { ...@@ -376,6 +376,7 @@ stress_table(DB_ENV* env, DB **dbs, struct cli_args *cli_args) {
} else { } else {
myargs[i].operation = iibench_rangequery_op; myargs[i].operation = iibench_rangequery_op;
myargs[i].operation_extra = &put_extra; myargs[i].operation_extra = &put_extra;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
myargs[i].sleep_ms = 1000; // 1 second between range queries myargs[i].sleep_ms = 1000; // 1 second between range queries
} }
} }
......
...@@ -54,6 +54,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { ...@@ -54,6 +54,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
for (int i = 0; i < num_threads; i++) { for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args); arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].operation = ptquery_op; myargs[i].operation = ptquery_op;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
} }
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
} }
......
...@@ -67,6 +67,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { ...@@ -67,6 +67,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
thread_ids[i] = i % cli_args->num_DBs; thread_ids[i] = i % cli_args->num_DBs;
myargs[i].operation = ptquery_op2; myargs[i].operation = ptquery_op2;
myargs[i].operation_extra = &thread_ids[i]; myargs[i].operation_extra = &thread_ids[i];
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
} }
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
} }
......
...@@ -23,6 +23,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) { ...@@ -23,6 +23,7 @@ stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
for (int i = 0; i < num_threads; i++) { for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args); arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].operation = rangequery_op; myargs[i].operation = rangequery_op;
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
} }
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args); run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
} }
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: perf_nop.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
// The intent of this test is to measure the throughput of creating and destroying
// root read-only transactions that create snapshots
static int UU() nop(DB_TXN* UU(txn), ARG UU(arg), void* UU(operation_extra), void *UU(stats_extra)) {
return 0;
}
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
if (verbose) printf("starting creation of pthreads\n");
const int num_threads = cli_args->num_ptquery_threads;
struct arg myargs[num_threads];
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], dbp, env, cli_args);
myargs[i].txn_flags |= DB_TXN_READ_ONLY;
myargs[i].operation = nop;
}
run_workers(myargs, num_threads, cli_args->num_seconds, false, cli_args);
}
int
test_main(int argc, char *const argv[]) {
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
args.single_txn = false;
args.num_elements = 0;
args.num_DBs = 0;
args.num_put_threads = 0;
args.num_update_threads = 0;
stress_test_main(&args);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: perf_txn_single_thread.cc 51911 2013-01-10 18:21:29Z zardosht $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
// The intent of this test is to measure how fast a single thread can
// commit and create transactions when there exist N transactions.
DB_TXN** txns;
int num_txns;
static int commit_and_create_txn(
DB_TXN* UU(txn),
ARG arg,
void* UU(operation_extra),
void* UU(stats_extra)
)
{
int rand_txn_id = random() % num_txns;
int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
CKERR(r);
r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags | DB_TXN_READ_ONLY);
CKERR(r);
return 0;
}
static void
stress_table(DB_ENV* env, DB** dbp, struct cli_args *cli_args) {
if (verbose) printf("starting running of stress\n");
num_txns = cli_args->txn_size;
XCALLOC_N(num_txns, txns);
for (int i = 0; i < num_txns; i++) {
int r = env->txn_begin(env, 0, &txns[i], DB_TXN_SNAPSHOT);
CKERR(r);
}
struct arg myarg;
arg_init(&myarg, dbp, env, cli_args);
myarg.operation = commit_and_create_txn;
run_workers(&myarg, 1, cli_args->num_seconds, false, cli_args);
for (int i = 0; i < num_txns; i++) {
int chk_r = txns[i]->commit(txns[i], 0);
CKERR(chk_r);
}
toku_free(txns);
num_txns = 0;
}
int
test_main(int argc, char *const argv[]) {
num_txns = 0;
txns = NULL;
struct cli_args args = get_default_args_for_perf();
parse_stress_test_args(argc, argv, &args);
args.single_txn = true;
// this test is all about transactions, make the DB small
args.num_elements = 1;
args.num_DBs= 1;
perf_test_main(&args);
return 0;
}
...@@ -31,7 +31,7 @@ static int commit_and_create_txn( ...@@ -31,7 +31,7 @@ static int commit_and_create_txn(
int rand_txn_id = random() % num_txns; int rand_txn_id = random() % num_txns;
int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0); int r = txns[rand_txn_id]->commit(txns[rand_txn_id], 0);
CKERR(r); CKERR(r);
r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_type); r = arg->env->txn_begin(arg->env, 0, &txns[rand_txn_id], arg->txn_flags);
CKERR(r); CKERR(r);
return 0; return 0;
} }
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
int test_main(int argc, char * const argv[])
{
int r;
DB * db;
DB_ENV * env;
(void) argc;
(void) argv;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
r = db_create(&db, env, 0);
CKERR(r);
r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR(r);
DB_TXN* txn = NULL;
r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT);
CKERR(r);
int k = 1;
int v = 10;
DBT key, val;
r = db->put(
db,
txn,
dbt_init(&key, &k, sizeof k),
dbt_init(&val, &v, sizeof v),
0
);
CKERR(r);
k = 2;
v = 20;
r = db->put(
db,
txn,
dbt_init(&key, &k, sizeof k),
dbt_init(&val, &v, sizeof v),
0
);
CKERR(r);
r = txn->commit(txn, 0);
CKERR(r);
r = env->txn_begin(env, 0, &txn, DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY);
CKERR(r);
DBC* cursor = NULL;
r = db->cursor(db, txn, &cursor, 0);
CKERR(r);
DBT key1, val1;
memset(&key1, 0, sizeof key1);
memset(&val1, 0, sizeof val1);
r = cursor->c_get(cursor, &key1, &val1, DB_FIRST);
CKERR(r);
invariant(key1.size == sizeof(int));
invariant(*(int *)key1.data == 1);
invariant(val1.size == sizeof(int));
invariant(*(int *)val1.data == 10);
r = cursor->c_get(cursor, &key1, &val1, DB_NEXT);
CKERR(r);
invariant(key1.size == sizeof(int));
invariant(*(int *)key1.data == 2);
invariant(val1.size == sizeof(int));
invariant(*(int *)val1.data == 20);
r = cursor->c_close(cursor);
CKERR(r);
r = txn->commit(txn, 0);
CKERR(r);
// clean things up
r = db->close(db, 0);
CKERR(r);
r = env->close(env, 0);
CKERR(r);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
int test_main(int argc, char * const argv[])
{
int r;
DB * db;
DB_ENV * env;
(void) argc;
(void) argv;
const char *db_env_dir = TOKU_TEST_FILENAME;
char rm_cmd[strlen(db_env_dir) + strlen("rm -rf ") + 1];
snprintf(rm_cmd, sizeof(rm_cmd), "rm -rf %s", db_env_dir);
r = system(rm_cmd); { int chk_r = r; CKERR(chk_r); }
r = toku_os_mkdir(db_env_dir, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->open(env, db_env_dir, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
r = db_create(&db, env, 0);
CKERR(r);
r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR(r);
DB_TXN* txn1 = NULL;
DB_TXN* txn2 = NULL;
r = env->txn_begin(env, 0, &txn1, DB_TXN_READ_ONLY);
CKERR(r);
r = env->txn_begin(env, 0, &txn2, DB_TXN_READ_ONLY);
CKERR(r);
r=db->pre_acquire_table_lock(db, txn1); CKERR(r);
r=db->pre_acquire_table_lock(db, txn2); CKERR2(r, DB_LOCK_NOTGRANTED);
r = txn1->commit(txn1, 0);
CKERR(r);
r = txn2->commit(txn2, 0);
CKERR(r);
// clean things up
r = db->close(db, 0);
CKERR(r);
r = env->close(env, 0);
CKERR(r);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
static int update_fun(DB *UU(db),
const DBT *UU(key),
const DBT *UU(old_val), const DBT *UU(extra),
void (*set_val)(const DBT *new_val,
void *set_extra),
void *UU(set_extra))
{
abort();
assert(set_val != NULL);
return 0;
}
static int generate_row_for_put(
DB *UU(dest_db),
DB *UU(src_db),
DBT *UU(dest_key),
DBT *UU(dest_val),
const DBT *UU(src_key),
const DBT *UU(src_val)
)
{
abort();
return 0;
}
static int generate_row_for_del(
DB *UU(dest_db),
DB *UU(src_db),
DBT *UU(dest_key),
const DBT *UU(src_key),
const DBT *UU(src_val)
)
{
abort();
return 0;
}
static void test_invalid_ops(uint32_t iso_flags) {
int r;
DB * db;
DB_ENV * env;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->set_generate_row_callback_for_put(env,generate_row_for_put);
CKERR(r);
r = env->set_generate_row_callback_for_del(env,generate_row_for_del);
CKERR(r);
env->set_update(env, update_fun);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
r = db_create(&db, env, 0);
CKERR(r);
DB_TXN* txn = NULL;
r = env->txn_begin(env, 0, &txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = db->open(db, txn, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR2(r, EINVAL);
r = db->open(db, NULL, "foo.db", NULL, DB_BTREE, DB_CREATE, 0644);
CKERR(r);
int k = 1;
int v = 10;
DBT key, val;
dbt_init(&key, &k, sizeof k);
dbt_init(&val, &v, sizeof v);
uint32_t db_flags = 0;
uint32_t indexer_flags = 0;
DB_INDEXER* indexer;
r = env->create_indexer(
env,
txn,
&indexer,
db,
1,
&db,
&db_flags,
indexer_flags
);
CKERR2(r, EINVAL);
// test invalid operations of ydb_db.cc,
// db->open tested above
DB_LOADER* loader;
uint32_t put_flags = 0;
uint32_t dbt_flags = 0;
r = env->create_loader(env, txn, &loader, NULL, 1, &db, &put_flags, &dbt_flags, 0);
CKERR2(r, EINVAL);
r = db->change_descriptor(db, txn, &key, 0);
CKERR2(r, EINVAL);
//
// test invalid operations return EINVAL from ydb_write.cc
//
r = db->put(db, txn, &key, &val,0);
CKERR2(r, EINVAL);
r = db->del(db, txn, &key, DB_DELETE_ANY);
CKERR2(r, EINVAL);
r = db->update(db, txn, &key, &val, 0);
CKERR2(r, EINVAL);
r = db->update_broadcast(db, txn, &val, 0);
CKERR2(r, EINVAL);
r = env->put_multiple(env, NULL, txn, &key, &val, 1, &db, &key, &val, 0);
CKERR2(r, EINVAL);
r = env->del_multiple(env, NULL, txn, &key, &val, 1, &db, &key, 0);
CKERR2(r, EINVAL);
uint32_t flags;
r = env->update_multiple(
env, NULL, txn,
&key, &val,
&key, &val,
1, &db, &flags,
1, &key,
1, &val
);
CKERR2(r, EINVAL);
r = db->close(db, 0);
CKERR(r);
// test invalid operations of ydb.cc, dbrename and dbremove
r = env->dbremove(env, txn, "foo.db", NULL, 0);
CKERR2(r, EINVAL);
// test invalid operations of ydb.cc, dbrename and dbremove
r = env->dbrename(env, txn, "foo.db", NULL, "bar.db", 0);
CKERR2(r, EINVAL);
r = txn->commit(txn, 0);
CKERR(r);
// clean things up
r = env->close(env, 0);
CKERR(r);
}
int test_main(int argc, char * const argv[]) {
(void) argc;
(void) argv;
test_invalid_ops(0);
test_invalid_ops(DB_TXN_SNAPSHOT);
test_invalid_ops(DB_READ_COMMITTED);
test_invalid_ops(DB_READ_UNCOMMITTED);
return 0;
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: test_get_max_row_size.cc 45903 2012-07-19 13:06:39Z leifwalsh $"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "test.h"
static void test_read_txn_creation(DB_ENV* env, uint32_t iso_flags) {
int r;
DB_TXN* parent_txn = NULL;
DB_TXN* child_txn = NULL;
r = env->txn_begin(env, 0, &parent_txn, iso_flags);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR2(r, EINVAL);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = parent_txn->commit(parent_txn, 0);
CKERR(r);
r = env->txn_begin(env, 0, &parent_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags | DB_TXN_READ_ONLY);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = env->txn_begin(env, parent_txn, &child_txn, iso_flags);
CKERR(r);
r = child_txn->commit(child_txn, 0);
CKERR(r);
r = parent_txn->commit(parent_txn, 0);
CKERR(r);
}
int test_main(int argc, char * const argv[])
{
int r;
DB_ENV * env;
(void) argc;
(void) argv;
toku_os_recursive_delete(TOKU_TEST_FILENAME);
r = toku_os_mkdir(TOKU_TEST_FILENAME, 0755); { int chk_r = r; CKERR(chk_r); }
// set things up
r = db_env_create(&env, 0);
CKERR(r);
r = env->open(env, TOKU_TEST_FILENAME, DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE, 0755);
CKERR(r);
test_read_txn_creation(env, 0);
test_read_txn_creation(env, DB_TXN_SNAPSHOT);
test_read_txn_creation(env, DB_READ_COMMITTED);
test_read_txn_creation(env, DB_READ_UNCOMMITTED);
r = env->close(env, 0);
CKERR(r);
return 0;
}
...@@ -69,6 +69,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -69,6 +69,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false; soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1]; myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op; myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner // make the backward fast scanner
soe[2].fast = true; soe[2].fast = true;
...@@ -76,6 +77,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -76,6 +77,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false; soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2]; myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op; myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner // make the backward slow scanner
soe[3].fast = false; soe[3].fast = false;
......
...@@ -63,6 +63,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -63,6 +63,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false; soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1]; myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op; myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner // make the backward fast scanner
soe[2].fast = true; soe[2].fast = true;
...@@ -70,6 +71,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -70,6 +71,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false; soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2]; myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op; myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner // make the backward slow scanner
soe[3].fast = false; soe[3].fast = false;
......
...@@ -62,6 +62,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -62,6 +62,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[1].prefetch = false; soe[1].prefetch = false;
myargs[1].operation_extra = &soe[1]; myargs[1].operation_extra = &soe[1];
myargs[1].operation = scan_op; myargs[1].operation = scan_op;
myargs[1].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward fast scanner // make the backward fast scanner
soe[2].fast = true; soe[2].fast = true;
...@@ -69,6 +70,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -69,6 +70,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[2].prefetch = false; soe[2].prefetch = false;
myargs[2].operation_extra = &soe[2]; myargs[2].operation_extra = &soe[2];
myargs[2].operation = scan_op; myargs[2].operation = scan_op;
myargs[2].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the backward slow scanner // make the backward slow scanner
soe[3].fast = false; soe[3].fast = false;
......
...@@ -36,6 +36,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) { ...@@ -36,6 +36,7 @@ stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
soe[0].prefetch = false; soe[0].prefetch = false;
myargs[0].operation_extra = &soe[0]; myargs[0].operation_extra = &soe[0];
myargs[0].operation = scan_op; myargs[0].operation = scan_op;
myargs[0].txn_flags = DB_TXN_SNAPSHOT | DB_TXN_READ_ONLY;
// make the forward slow scanner // make the forward slow scanner
soe[1].fast = false; soe[1].fast = false;
......
...@@ -135,7 +135,7 @@ struct arg { ...@@ -135,7 +135,7 @@ struct arg {
// DB are in [0, num_elements) // DB are in [0, num_elements)
// false otherwise // false otherwise
int sleep_ms; // number of milliseconds to sleep between operations int sleep_ms; // number of milliseconds to sleep between operations
uint32_t txn_type; // isolation level for txn running operation uint32_t txn_flags; // isolation level for txn running operation
operation_t operation; // function that is the operation to be run operation_t operation; // function that is the operation to be run
void* operation_extra; // extra parameter passed to operation void* operation_extra; // extra parameter passed to operation
enum stress_lock_type lock_type; // states if operation must be exclusive, shared, or does not require locking enum stress_lock_type lock_type; // states if operation must be exclusive, shared, or does not require locking
...@@ -155,7 +155,7 @@ static void arg_init(struct arg *arg, DB **dbp, DB_ENV *env, struct cli_args *cl ...@@ -155,7 +155,7 @@ static void arg_init(struct arg *arg, DB **dbp, DB_ENV *env, struct cli_args *cl
arg->bounded_element_range = true; arg->bounded_element_range = true;
arg->sleep_ms = 0; arg->sleep_ms = 0;
arg->lock_type = STRESS_LOCK_NONE; arg->lock_type = STRESS_LOCK_NONE;
arg->txn_type = DB_TXN_SNAPSHOT; arg->txn_flags = DB_TXN_SNAPSHOT;
arg->operation_extra = nullptr; arg->operation_extra = nullptr;
arg->do_prepare = false; arg->do_prepare = false;
arg->prelock_updates = false; arg->prelock_updates = false;
...@@ -488,12 +488,12 @@ static void *worker(void *arg_v) { ...@@ -488,12 +488,12 @@ static void *worker(void *arg_v) {
printf("%lu starting %p\n", (unsigned long) intself, arg->operation); printf("%lu starting %p\n", (unsigned long) intself, arg->operation);
} }
if (arg->cli->single_txn) { if (arg->cli->single_txn) {
r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r); r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r);
} }
while (run_test) { while (run_test) {
lock_worker_op(we); lock_worker_op(we);
if (!arg->cli->single_txn) { if (!arg->cli->single_txn) {
r = env->txn_begin(env, 0, &txn, arg->txn_type); CKERR(r); r = env->txn_begin(env, 0, &txn, arg->txn_flags); CKERR(r);
} }
r = arg->operation(txn, arg, arg->operation_extra, we->counters); r = arg->operation(txn, arg, arg->operation_extra, we->counters);
if (r==0 && !arg->cli->single_txn && arg->do_prepare) { if (r==0 && !arg->cli->single_txn && arg->do_prepare) {
...@@ -2654,7 +2654,7 @@ UU() stress_recover(struct cli_args *args) { ...@@ -2654,7 +2654,7 @@ UU() stress_recover(struct cli_args *args) {
DB_TXN* txn = nullptr; DB_TXN* txn = nullptr;
struct arg recover_args; struct arg recover_args;
arg_init(&recover_args, dbs, env, args); arg_init(&recover_args, dbs, env, args);
int r = env->txn_begin(env, 0, &txn, recover_args.txn_type); int r = env->txn_begin(env, 0, &txn, recover_args.txn_flags);
CKERR(r); CKERR(r);
struct scan_op_extra soe = { struct scan_op_extra soe = {
.fast = true, .fast = true,
......
...@@ -209,6 +209,16 @@ env_opened(DB_ENV *env) { ...@@ -209,6 +209,16 @@ env_opened(DB_ENV *env) {
return env->i->cachetable != 0; return env->i->cachetable != 0;
} }
static inline bool
txn_is_read_only(DB_TXN* txn) {
if (txn && (db_txn_struct_i(txn)->flags & DB_TXN_READ_ONLY)) {
return true;
}
return false;
}
#define HANDLE_READ_ONLY_TXN(txn) if(txn_is_read_only(txn)) return EINVAL;
void env_panic(DB_ENV * env, int cause, const char * msg); void env_panic(DB_ENV * env, int cause, const char * msg);
void env_note_db_opened(DB_ENV *env, DB *db); void env_note_db_opened(DB_ENV *env, DB *db);
void env_note_db_closed(DB_ENV *env, DB *db); void env_note_db_closed(DB_ENV *env, DB *db);
......
...@@ -1205,6 +1205,7 @@ static int ...@@ -1205,6 +1205,7 @@ static int
locked_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) { locked_env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags) {
int ret, r; int ret, r;
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
HANDLE_READ_ONLY_TXN(txn);
DB_TXN *child_txn = NULL; DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN; int using_txns = env->i->open_flags & DB_INIT_TXN;
...@@ -1235,6 +1236,7 @@ static int env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char ...@@ -1235,6 +1236,7 @@ static int env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char
static int static int
locked_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) { locked_env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, const char *newname, uint32_t flags) {
int ret, r; int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
DB_TXN *child_txn = NULL; DB_TXN *child_txn = NULL;
...@@ -2413,6 +2415,7 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u ...@@ -2413,6 +2415,7 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u
if (!env_opened(env) || flags != 0) { if (!env_opened(env) || flags != 0) {
return EINVAL; return EINVAL;
} }
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) { if (dbname != NULL) {
// env_dbremove_subdb() converts (fname, dbname) to dname // env_dbremove_subdb() converts (fname, dbname) to dname
return env_dbremove_subdb(env, txn, fname, dbname, flags); return env_dbremove_subdb(env, txn, fname, dbname, flags);
...@@ -2519,6 +2522,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co ...@@ -2519,6 +2522,7 @@ env_dbrename(DB_ENV *env, DB_TXN *txn, const char *fname, const char *dbname, co
if (!env_opened(env) || flags != 0) { if (!env_opened(env) || flags != 0) {
return EINVAL; return EINVAL;
} }
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) { if (dbname != NULL) {
// env_dbrename_subdb() converts (fname, dbname) to dname and (fname, newname) to newdname // env_dbrename_subdb() converts (fname, dbname) to dname and (fname, newname) to newdname
return env_dbrename_subdb(env, txn, fname, dbname, newname, flags); return env_dbrename_subdb(env, txn, fname, dbname, newname, flags);
......
...@@ -210,6 +210,7 @@ static uint64_t nontransactional_open_id = 0; ...@@ -210,6 +210,7 @@ static uint64_t nontransactional_open_id = 0;
static int static int
toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) { toku_db_open(DB * db, DB_TXN * txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_READ_ONLY_TXN(txn);
if (dbname != NULL) { if (dbname != NULL) {
return db_open_subdb(db, txn, fname, dbname, dbtype, flags, mode); return db_open_subdb(db, txn, fname, dbname, dbtype, flags, mode);
} }
...@@ -347,6 +348,7 @@ void toku_db_lt_on_destroy_callback(toku::locktree *lt) { ...@@ -347,6 +348,7 @@ void toku_db_lt_on_destroy_callback(toku::locktree *lt) {
int int
toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) { toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t flags, int mode) {
//Set comparison functions if not yet set. //Set comparison functions if not yet set.
HANDLE_READ_ONLY_TXN(txn);
if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) { if (!db->i->key_compare_was_set && db->dbenv->i->bt_compare) {
toku_ft_set_bt_compare(db->i->ft_handle, db->dbenv->i->bt_compare); toku_ft_set_bt_compare(db->i->ft_handle, db->dbenv->i->bt_compare);
db->i->key_compare_was_set = true; db->i->key_compare_was_set = true;
...@@ -469,6 +471,7 @@ int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn) { ...@@ -469,6 +471,7 @@ int toku_db_pre_acquire_fileops_lock(DB *db, DB_TXN *txn) {
static int static int
toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t flags) { toku_db_change_descriptor(DB *db, DB_TXN* txn, const DBT* descriptor, uint32_t flags) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_READ_ONLY_TXN(txn);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
int r = 0; int r = 0;
TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL; TOKUTXN ttxn = txn ? db_txn_struct_i(txn)->tokutxn : NULL;
...@@ -695,6 +698,7 @@ autotxn_db_getf_set (DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK ...@@ -695,6 +698,7 @@ autotxn_db_getf_set (DB *db, DB_TXN *txn, uint32_t flags, DBT *key, YDB_CALLBACK
static int static int
locked_db_open(DB *db, DB_TXN *txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) { locked_db_open(DB *db, DB_TXN *txn, const char *fname, const char *dbname, DBTYPE dbtype, uint32_t flags, int mode) {
int ret, r; int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
// //
...@@ -1024,6 +1028,7 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new ...@@ -1024,6 +1028,7 @@ load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], const char * new
int int
locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) { locked_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[/*N*/], char * new_inames_in_env[/*N*/], LSN *load_lsn, bool mark_as_loader) {
int ret, r; int ret, r;
HANDLE_READ_ONLY_TXN(txn);
DB_TXN *child_txn = NULL; DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN; int using_txns = env->i->open_flags & DB_INIT_TXN;
......
...@@ -329,6 +329,36 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) { ...@@ -329,6 +329,36 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) {
uint32_t txn_flags = 0; uint32_t txn_flags = 0;
txn_flags |= DB_TXN_NOWAIT; //We do not support blocking locks. RFP remove this? txn_flags |= DB_TXN_NOWAIT; //We do not support blocking locks. RFP remove this?
// handle whether txn is declared as read only
bool parent_txn_declared_read_only =
stxn &&
(db_txn_struct_i(stxn)->flags & DB_TXN_READ_ONLY);
bool txn_declared_read_only = false;
if (flags & DB_TXN_READ_ONLY) {
txn_declared_read_only = true;
txn_flags |= DB_TXN_READ_ONLY;
flags &= ~(DB_TXN_READ_ONLY);
}
if (txn_declared_read_only && stxn &&
!parent_txn_declared_read_only
)
{
return toku_ydb_do_error(
env,
EINVAL,
"Current transaction set as read only, but parent transaction is not\n"
);
}
if (parent_txn_declared_read_only)
{
// don't require child transaction to also set transaction as read only
// if parent has already done so
txn_flags |= DB_TXN_READ_ONLY;
txn_declared_read_only = true;
}
TOKU_ISOLATION child_isolation = TOKU_ISO_SERIALIZABLE; TOKU_ISOLATION child_isolation = TOKU_ISO_SERIALIZABLE;
uint32_t iso_flags = flags & DB_ISOLATION_FLAGS; uint32_t iso_flags = flags & DB_ISOLATION_FLAGS;
if (!(iso_flags == 0 || if (!(iso_flags == 0 ||
...@@ -434,7 +464,8 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) { ...@@ -434,7 +464,8 @@ toku_txn_begin(DB_ENV *env, DB_TXN * stxn, DB_TXN ** txn, uint32_t flags) {
TXNID_PAIR_NONE, TXNID_PAIR_NONE,
snapshot_type, snapshot_type,
result, result,
false false, // for_recovery
txn_declared_read_only // read_only
); );
if (r != 0) { if (r != 0) {
toku_free(result); toku_free(result);
......
...@@ -132,6 +132,7 @@ int ...@@ -132,6 +132,7 @@ int
toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) { toku_db_del(DB *db, DB_TXN *txn, DBT *key, uint32_t flags, bool holds_mo_lock) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
uint32_t unchecked_flags = flags; uint32_t unchecked_flags = flags;
//DB_DELETE_ANY means delete regardless of whether it exists in the db. //DB_DELETE_ANY means delete regardless of whether it exists in the db.
...@@ -175,6 +176,7 @@ int ...@@ -175,6 +176,7 @@ int
toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) { toku_db_put(DB *db, DB_TXN *txn, DBT *key, DBT *val, uint32_t flags, bool holds_mo_lock) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0; int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags); uint32_t lock_flags = get_prelocked_flags(flags);
...@@ -222,6 +224,7 @@ toku_db_update(DB *db, DB_TXN *txn, ...@@ -222,6 +224,7 @@ toku_db_update(DB *db, DB_TXN *txn,
uint32_t flags) { uint32_t flags) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0; int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags); uint32_t lock_flags = get_prelocked_flags(flags);
...@@ -263,6 +266,7 @@ toku_db_update_broadcast(DB *db, DB_TXN *txn, ...@@ -263,6 +266,7 @@ toku_db_update_broadcast(DB *db, DB_TXN *txn,
uint32_t flags) { uint32_t flags) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn); HANDLE_DB_ILLEGAL_WORKING_PARENT_TXN(db, txn);
HANDLE_READ_ONLY_TXN(txn);
int r = 0; int r = 0;
uint32_t lock_flags = get_prelocked_flags(flags); uint32_t lock_flags = get_prelocked_flags(flags);
...@@ -428,6 +432,7 @@ env_del_multiple( ...@@ -428,6 +432,7 @@ env_del_multiple(
DB_INDEXER* indexer = NULL; DB_INDEXER* indexer = NULL;
HANDLE_PANICKED_ENV(env); HANDLE_PANICKED_ENV(env);
HANDLE_READ_ONLY_TXN(txn);
uint32_t lock_flags[num_dbs]; uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs]; uint32_t remaining_flags[num_dbs];
...@@ -574,6 +579,7 @@ env_put_multiple_internal( ...@@ -574,6 +579,7 @@ env_put_multiple_internal(
DB_INDEXER* indexer = NULL; DB_INDEXER* indexer = NULL;
HANDLE_PANICKED_ENV(env); HANDLE_PANICKED_ENV(env);
HANDLE_READ_ONLY_TXN(txn);
uint32_t lock_flags[num_dbs]; uint32_t lock_flags[num_dbs];
uint32_t remaining_flags[num_dbs]; uint32_t remaining_flags[num_dbs];
...@@ -674,6 +680,7 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn, ...@@ -674,6 +680,7 @@ env_update_multiple(DB_ENV *env, DB *src_db, DB_TXN *txn,
HANDLE_PANICKED_ENV(env); HANDLE_PANICKED_ENV(env);
DB_INDEXER* indexer = NULL; DB_INDEXER* indexer = NULL;
HANDLE_READ_ONLY_TXN(txn);
if (!txn) { if (!txn) {
r = EINVAL; r = EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment