Commit eb5ecd41 authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul Committed by Yoni Fogel

Merge changes from 2216a to main (except possibly for the windows...

Merge changes from 2216a to main (except possibly for the windows subdirectory, which Yoni will do separately).
2216a is no longer valid.
Refs #2216. [t:2216].
{{{
svn merge -r17301:18179 https://svn.tokutek.com/tokudb/toku/tokudb.2216a/src/tests
}}}



git-svn-id: file:///svn/toku/tokudb@18181 c7de825b-a66e-492c-adef-691d508d4ae1
parent ce33ebcc
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -188,6 +188,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -206,8 +208,8 @@ struct __toku_db_env {
void *app_private; /* 32-bit offset=36 size=4, 64=bit offset=72 size=8 */
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
uint32_t num_dbs, DB **db_array, DBT *keys, DBT *vals, uint32_t *flags_array,
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -190,6 +190,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -208,8 +210,8 @@ struct __toku_db_env {
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
void *app_private; /* 32-bit offset=44 size=4, 64=bit offset=88 size=8 */
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
uint32_t num_dbs, DB **db_array, DBT *keys, DBT *vals, uint32_t *flags_array,
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -191,6 +191,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -209,8 +211,8 @@ struct __toku_db_env {
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
void *app_private; /* 32-bit offset=44 size=4, 64=bit offset=88 size=8 */
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
uint32_t num_dbs, DB **db_array, DBT *keys, DBT *vals, uint32_t *flags_array,
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -191,6 +191,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -208,8 +210,8 @@ struct __toku_db_env {
int (*set_default_dup_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (val) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */;
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
void *app_private; /* 32-bit offset=52 size=4, 64=bit offset=104 size=8 */
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -193,6 +193,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -210,8 +212,8 @@ struct __toku_db_env {
int (*set_default_dup_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (val) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */;
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
void *app_private; /* 32-bit offset=52 size=4, 64=bit offset=104 size=8 */
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
......
......@@ -13,7 +13,7 @@
#define VISIBLE "__attribute__((__visibility__(\"default\")))"
void print_dbtype(void) {
static void print_dbtype(void) {
/* DBTYPE is mentioned by db_open.html */
printf("typedef enum {\n");
printf(" DB_BTREE=%d,\n", DB_BTREE);
......@@ -69,7 +69,7 @@ enum {
TOKUDB_DICTIONARY_NO_HEADER = -100006
};
void print_defines (void) {
static void print_defines (void) {
printf("#ifndef _TOKUDB_WRAP_H\n");
dodefine(DB_VERB_DEADLOCK);
dodefine(DB_VERB_RECOVERY);
......@@ -192,6 +192,10 @@ void print_defines (void) {
dodefine(TOKUDB_DICTIONARY_NO_HEADER);
dodefine(TOKUDB_FOUND_BUT_REJECTED);
dodefine(TOKUDB_USER_CALLBACK_ERROR);
/* LOADER flags */
printf("/* LOADER flags */\n");
printf("#define LOADER_USE_PUTS 1\n"); // minimize space usage
}
//#define DECL_LIMIT 100
......@@ -222,7 +226,7 @@ struct fieldinfo {
enum need_internal_type { NO_INTERNAL=0, INTERNAL_NAMED=1, INTERNAL_AT_END=2};
void print_struct (const char *structname, enum need_internal_type need_internal, struct fieldinfo *fields32, struct fieldinfo *fields64, unsigned int N, const char *extra_decls[]) {
static void print_struct (const char *structname, enum need_internal_type need_internal, struct fieldinfo *fields32, struct fieldinfo *fields64, unsigned int N, const char *extra_decls[]) {
unsigned int i;
unsigned int current_32 = 0;
unsigned int current_64 = 0;
......@@ -400,7 +404,7 @@ int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__un
printf("struct __toku_loader_internal;\n");
printf("struct __toku_loader {\n");
printf(" struct __toku_loader_internal *i;\n");
printf(" int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */\n");
printf(" int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */\n");
printf(" int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */\n");
printf(" int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */\n");
printf(" int (*close)(DB_LOADER *loader); /* finish loading, free memory */\n");
......@@ -480,8 +484,8 @@ int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__un
"int (*set_default_dup_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (val) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */",
"int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */",
"int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */",
"int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */",
"int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra)",
"int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */",
"int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra)",
"int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,\n"
" const DBT *key, const DBT *val,\n"
" uint32_t num_dbs, DB **db_array, DBT *keys, DBT *vals, uint32_t *flags_array,\n"
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -193,6 +193,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -210,8 +212,8 @@ struct __toku_db_env {
int (*set_default_dup_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (val) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */;
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
void *app_private;
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
......
......@@ -50,7 +50,7 @@ typedef struct __toku_loader DB_LOADER;
struct __toku_loader_internal;
struct __toku_loader {
struct __toku_loader_internal *i;
int (*set_duplicate_callback)(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val)); /* set the duplicate callback */
int (*set_error_callback)(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra)); /* set the error callback */
int (*set_poll_function)(DB_LOADER *loader, int (*poll_func)(void *extra, float progress)); /* set the polling function */
int (*put)(DB_LOADER *loader, DBT *key, DBT* val); /* give a row to the loader */
int (*close)(DB_LOADER *loader); /* finish loading, free memory */
......@@ -193,6 +193,8 @@ typedef enum {
#define TOKUDB_DICTIONARY_NO_HEADER -100006
#define TOKUDB_FOUND_BUT_REJECTED -100002
#define TOKUDB_USER_CALLBACK_ERROR -100003
/* LOADER flags */
#define LOADER_USE_PUTS 1
/* in wrap mode, top-level function txn_begin is renamed, but the field isn't renamed, so we have to hack it here.*/
#ifdef _TOKUDB_WRAP_H
#undef txn_begin
......@@ -210,8 +212,8 @@ struct __toku_db_env {
int (*set_default_dup_compare) (DB_ENV*,int (*bt_compare) (DB *, const DBT *, const DBT *)) /* Set default (val) comparison function for all DBs in this environment. Required for RECOVERY since you cannot open the DBs manually. */;
int (*get_engine_status) (DB_ENV*, ENGINE_STATUS*) /* Fill in status struct */;
int (*get_engine_status_text) (DB_ENV*, char*, int) /* Fill in status text */;
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t flags[/*N*/], uint32_t dbt_flags[/*N*/], void *extra);
int (*get_iname) (DB_ENV* env, DBT* dname_dbt, DBT* iname_dbt) /* FOR TEST ONLY: lookup existing iname */;
int (*create_loader) (DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[/*N*/], uint32_t db_flags[/*N*/], uint32_t dbt_flags[/*N*/], uint32_t loader_flags, void *extra);
void *app_private;
int (*put_multiple) (DB_ENV *env, DB *src_db, DB_TXN *txn,
const DBT *key, const DBT *val,
......
......@@ -96,7 +96,7 @@ toku_os_full_write (int fd, const void *buf, size_t len) {
assert(len == 0);
}
ssize_t
int
toku_os_write (int fd, const void *buf, size_t len) {
while (len > 0) {
ssize_t r;
......@@ -105,8 +105,8 @@ toku_os_write (int fd, const void *buf, size_t len) {
} else {
r = write(fd, buf, len);
}
if (r <= 0)
return r;
if (r < 0)
return errno;
len -= r;
buf += r;
}
......
......@@ -86,7 +86,6 @@ toku_os_get_unique_file_id(int fildes, struct fileid *id) {
memset(id, 0, sizeof(*id));
int r=fstat(fildes, &statbuf);
if (r==0) {
memset(id, 0, sizeof(*id));
id->st_dev = statbuf.st_dev;
id->st_ino = statbuf.st_ino;
}
......
LIBPORTABILITY = lib/libtokuportability.$(SOEXT)
include linux/tests/make.include
LIBPORTABILITY = lib/libtokuportability.$(AEXT)
linux/build: $(LIBPORTABILITY)
linux/build: $(LIBPORTABILITY) linux/tests/build
LINUX_SRCS = $(wildcard linux/*.c)
LINUX_OBJS = $(patsubst %.c,%.$(OEXT),$(LINUX_SRCS))
$(LINUX_OBJS): CFLAGS += -DTOKU_ALLOW_DEPRECATED -D_GNU_SOURCE
$(LINUX_OBJS): CPPFLAGS_DIRECTORY = -Itoku_include -Ilinux
$(LINUX_OBJS): CFLAGS_DIRECTORY = -DTOKU_ALLOW_DEPRECATED
$(LINUX_OBJS): LDFLAGS_DIRECTORY =
$(LINUX_OBJS): LOADLIBES_DIRECTORY =
$(LINUX_OBJS): VISIBILITY=-fvisibility=default
$(LIBPORTABILITY): $(LINUX_OBJS)
$(LIBPORTABILITY): VISIBILITY=-fvisibility=default
$(LIBPORTABILITY): LOADLIBES=
$(LIBPORTABILITY): LDFLAGS=
linux/check: linux/tests/check
......@@ -3,7 +3,7 @@ CPPFLAGS = -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE
CPPFLAGS += -I../../toku_include -I.. -I.
CFLAGS = -Wall -Werror -g -O0 -std=c99
ifeq ($(GCCVERSION),4.4.2)
CFLAGS += -Wno-deprecated
CFLAGS += -Wno-deprecated
endif
ifneq ($(GCOV),)
CFLAGS += -fprofile-arcs -ftest-coverage -DGCOV
......@@ -39,11 +39,13 @@ test-gettime: test-gettime.c
build: $(TARGETS)
check: $(TARGETS) $(RUNTARGETS);
# pwrite4g needs an argument to tell it which directory to write temporary files
test-pwrite4g.tdbrun: TEST_EXTRA_ARGS=.
%.tdbrun: %
ifeq ($(VGRIND),)
./$< $(SUMMARIZE_CMD)
./$< $(TEST_EXTRA_ARGS) $(SUMMARIZE_CMD)
else
$(VGRIND) --error-exitcode=1 --quiet --leak-check=full --log-file=$<.check.valgrind ./$< >$<.check.output 2>&1; \
$(VGRIND) --error-exitcode=1 --quiet --leak-check=full --log-file=$<.check.valgrind ./$< $(TEST_EXTRA_ARGS) >$<.check.output 2>&1; \
if [ $$? = 0 ] ; then \
lines=`cat $<.check.valgrind | wc -l`; \
if [ $$lines -ne 0 ] ; then cat $<.check.valgrind; test 0 = 1; fi \
......
......@@ -3,12 +3,16 @@ LINUX_TESTS_TARGETS = $(patsubst %.c,%,$(LINUX_TESTS_SRCS))
LINUX_TESTS_RUNTARGETS = $(patsubst %,%.tdbrun,$(LINUX_TESTS_TARGETS))
linux/tests/build: $(LINUX_TESTS_TARGETS)
linux/tests/check: $(LINUX_TESTS_RUNTARGETS)
$(LINUX_TESTS_TARGETS): | $(LIBPORTABILITY)
linux/tests/%: CPPFLAGS = -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -Itoku_include -Ilinux -Ilinux/tests
linux/tests/%: CFLAGS = -Wall -Werror -g -O0 -std=c99 $(CC_VERSION_SPECIFIC_FLAGS)
linux/tests/%: LOADLIBES = lib/libtokuportability.a -lpthread
# Every directory must define these rules for its make rules. And not with +=
linux/tests/%: CFLAGS_DIRECTORY =
linux/tests/%: CPPFLAGS_DIRECTORY = -Itoku_include -Ilinux -Ilinux/tests
linux/tests/%: LOADLIBES_DIRECTORY = -Llib -ltokuportability -lpthread
linux/tests/test-pwrite4g.tdbrun: TEST_EXTRA_ARGS=linux/tests
linux/tests/%.tdbrun: linux/tests/%
$(VGRIND) ./$< $(SUMMARIZE_CMD)
$(VGRIND) ./$< $(TEST_EXTRA_ARGS) $(SUMMARIZE_CMD)
linux/tests/test-gettime: LOADLIBES += -lrt
linux/tests/foo:
echo $(VGRIND)
......@@ -23,6 +23,8 @@ AEXT_linux_gcc =a
AEXT_windows_icc=lib
AEXT=$(AEXT_$(OS)_$(COMPILER))
SOEXT=so
COMBINE_C_windows_icc = -Qipo-c
COMBINE_C_linux_icc = -ipo-c
COMBINE_C_linux_gcc = -combine -c
......@@ -30,7 +32,7 @@ COMBINE_C = $(COMBINE_C_$(OS)_$(COMPILER))
# Need XOPEN_SOURCE=600 to get strtoll()
ifeq ($(SYSTEM),linux)
CPPFLAGS+=-D_SVID_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_XOPEN_SOURCE=600
CPPFLAGS+=-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64
endif
ifeq ($(SYSTEM),sunos)
CPPFLAGS+=-D_SVID_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_XOPEN_SOURCE=600
......@@ -188,7 +190,9 @@ endif
# TODO: 1398 Get rid of this hack.
CPPFLAGS+=-DBRT_LEVEL_STRADDLE_CALLBACK_LOGIC_NOT_READY=1
CPPFLAGS+=-Itoku_include -I$(OS)
CPPFLAGS+=$(CPP_DIRECTORIES)
LDFLAGS = -Wl,-rpath,libo
ifeq ($(TOKU_SKIP_CXX),1)
SRCDIRS_CXX =
......@@ -200,7 +204,7 @@ endif
SRCDIRS = $(OS) buildheader newbrt
%.$(OEXT):%.c
$(CC) $< -c $(CPPFLAGS) $(CFLAGS) $(OOUTPUT)$@
$(CC) $< -c $(CPPFLAGS) $(CPPFLAGS_DIRECTORY) $(CFLAGS) $(OOUTPUT)$@
%.$(AEXT):
$(AR) $(ARFLAGS) $(AROUTPUT)$@ $(filter %.$(OEXT),$^) $(patsubst %.bundle, %.bundle/*.$(OEXT), $(filter-out %.$(OEXT),$^))
......@@ -221,7 +225,27 @@ else
SUMMARIZE_CMD =
endif
ifeq ($(VERBOSE),2)
VERBVERBOSE=-v
MAYBEATSIGN=
else ifeq ($(VERBOSE),1)
VERBVERBOSE=
MAYBEATSIGN=
else
VERBVERBOSE=-q
MAYBEATSIGN=@
endif
CPPFLAGS += $(CPPFLAGS_DIRECTORY)
CFLAGS += $(CFLAGS_DIRECTORY)
LDFLAGS += $(LDFLAGS_DIRECTORY)
LOADLIBES += $(LOADLIBES_DIRECTORY)
%.$(SOEXT):
$(CC) -shared $(CFLAGS) $(LDFLAGS) $^ $(LOADLIBES) -o $@
include $(patsubst %,%/make.include,$(SRCDIRS))
build: buildheader/build $(OS)/build
check: $(OS)/check
build: buildheader/build $(OS)/build newbrt/build
check: $(OS)/check newbrt/check
......@@ -95,6 +95,8 @@ else
NEWBRT_O_FILES = newbrt.o
endif
NEWBRT_O_FILES += brtloader.$(OEXT)
$(NEWBRT): $(NEWBRT_O_FILES)
$(NEWBRT_BUNDLE): log_code.c log_header.h
......@@ -153,4 +155,3 @@ clean-local:
# After doing (cd ../src/tests;make test_log5.recover), run these. The files should have no differences.
testdump: brtdump$(BINSUF)
./brtdump ../src/tests/dir.test_log5.c.tdb.recover/foo.db > dump.r && ./brtdump ../src/tests/dir.test_log5.c.tdb/foo.db > dump.$(OEXT) && diff dump.$(OEXT) dump.r
......@@ -35,13 +35,6 @@ struct translation { //This is the BTT (block translation table)
// location_on is stored in block_translation[RESERVED_BLOCKNUM_TRANSLATION].u.diskoff
};
//Unmovable reserved first, then reallocable.
// We reserve one blocknum for the translation table itself.
enum {RESERVED_BLOCKNUM_NULL =0,
RESERVED_BLOCKNUM_TRANSLATION=1,
RESERVED_BLOCKNUM_DESCRIPTOR =2,
RESERVED_BLOCKNUMS};
static const BLOCKNUM freelist_null = {-1}; // in a freelist, this indicates end of list
static const DISKOFF size_is_free = (DISKOFF)-1; // value of block_translation_pair.size if blocknum is unused
static const DISKOFF diskoff_unused = (DISKOFF)-2; // value of block_translation_pair.u.diskoff if blocknum is used but does not yet have a diskblock
......@@ -116,7 +109,6 @@ toku_maybe_truncate_cachefile_on_open(BLOCK_TABLE bt, struct brt_header *h) {
}
static void
copy_translation(struct translation * dst, struct translation * src, enum translation_type newtype) {
assert(src->length_of_array >= src->smallest_never_used_blocknum.b); //verify invariant
......@@ -876,4 +868,3 @@ toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATIO
block_allocator_get_unused_statistics(bt->block_allocator, report);
}
......@@ -46,6 +46,9 @@ void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF
//Serialization
void toku_serialize_translation_to_wbuf_unlocked(BLOCK_TABLE bt, struct wbuf *w, int64_t *address, int64_t *size);
void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt);
//DEBUG ONLY (brtdump included), tests included
void toku_blocknum_dump_translation(BLOCK_TABLE bt, BLOCKNUM b);
void toku_dump_translation_table(FILE *f, BLOCK_TABLE bt);
......@@ -65,5 +68,12 @@ void toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMEN
//Requires: blocktable lock is held.
//Requires: report->file_size_bytes is already filled in.
//Unmovable reserved first, then reallocable.
// We reserve one blocknum for the translation table itself.
enum {RESERVED_BLOCKNUM_NULL =0,
RESERVED_BLOCKNUM_TRANSLATION=1,
RESERVED_BLOCKNUM_DESCRIPTOR =2,
RESERVED_BLOCKNUMS};
#endif
......@@ -49,6 +49,13 @@ struct subtree_estimates {
static struct subtree_estimates const zero_estimates __attribute__((__unused__)) = {0,0,0,TRUE};
#if 0
static inline struct subtree_estimates __attribute__((__unused__))
make_subtree_estimates (u_int64_t nkeys, u_int64_t ndata, u_int64_t dsize, BOOL exact) {
return (struct subtree_estimates){.nkeys=nkeys, .ndata=ndata, .dsize=dsize, .exact=exact};
}
#endif
static inline void __attribute__((__unused__))
subtract_estimates (struct subtree_estimates *a, struct subtree_estimates *b) {
if (a->nkeys >= b->nkeys) a->nkeys -= b->nkeys; else a->nkeys=0;
......@@ -111,7 +118,7 @@ struct brtnode {
struct kv_pair **childkeys; /* Pivot keys. Child 0's keys are <= childkeys[0]. Child 1's keys are <= childkeys[1].
Note: It is possible that Child 1's keys are == to child 0's key's, so it is
not necessarily true that child 1's keys are > childkeys[0].
However, in the absense of duplicate keys, child 1's keys *are* > childkeys[0]. */
However, in the absence of duplicate keys, child 1's keys *are* > childkeys[0]. */
} n;
struct leaf {
struct subtree_estimates leaf_stats; // actually it is exact.
......@@ -147,12 +154,13 @@ struct brt_header {
enum brtheader_type type;
struct brt_header * checkpoint_header;
CACHEFILE cf;
char *fname; // the filename
u_int64_t checkpoint_count; // Free-running counter incremented once per checkpoint (toggling LSB).
// LSB indicates which header location is used on disk so this
// counter is effectively a boolean which alternates with each checkpoint.
LSN checkpoint_lsn; // LSN of creation of "checkpoint-begin" record in log.
int dirty;
BOOL dictionary_opened; // True once this header has been associated with a dictionary (a brt fully opened)
DICTIONARY_ID dict_id; // unique id for dictionary
int panic; // If nonzero there was a write error. Don't write any more, because it probably only gets worse. This is the error code.
char *panic_string; // A malloced string that can indicate what went wrong.
int layout_version;
......@@ -187,8 +195,6 @@ struct brt {
unsigned int flags;
BOOL did_set_flags;
BOOL did_set_descriptor;
BOOL did_set_filenum;
FILENUM filenum;
struct descriptor temp_descriptor;
toku_dbt_upgradef dbt_userformat_upgrade;
int (*compare_fun)(DB*,const DBT*,const DBT*);
......@@ -207,6 +213,9 @@ struct brt {
};
/* serialization code */
int toku_serialize_brtnode_to_memory (BRTNODE node, int n_workitems, int n_threads,
/*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write);
int toku_serialize_brtnode_to(int fd, BLOCKNUM, BRTNODE node, struct brt_header *h, int n_workitems, int n_threads, BOOL for_checkpoint);
int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, struct brt_header *h);
unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */
......@@ -218,7 +227,8 @@ int toku_serialize_brt_header_size (struct brt_header *h);
int toku_serialize_brt_header_to (int fd, struct brt_header *h);
int toku_serialize_brt_header_to_wbuf (struct wbuf *, struct brt_header *h, int64_t address_translation, int64_t size_translation);
int toku_deserialize_brtheader_from (int fd, struct brt_header **brth);
int toku_serialize_descriptor_contents_to_fd(int fd, struct descriptor *desc, DISKOFF offset);
int toku_serialize_descriptor_contents_to_fd(int fd, const struct descriptor *desc, DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const struct descriptor *desc);
void toku_brtnode_free (BRTNODE *node);
......
......@@ -353,8 +353,6 @@ static size_t get_sum_uncompressed_size(int n, struct sub_block_sizes sizes[]) {
return uncompressed_size;
}
static void serialize_descriptor_contents_to_wbuf(struct wbuf *wb, struct descriptor *desc);
#include "workset.h"
struct compress_work {
......@@ -400,7 +398,9 @@ compress_worker(void *arg) {
return arg;
}
int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct brt_header *h, int UU(n_workitems), int UU(n_threads), BOOL for_checkpoint) {
int toku_serialize_brtnode_to_memory (BRTNODE node, int n_workitems __attribute__((__unused__)), int n_threads __attribute__((__unused__)),
/*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write) {
struct wbuf w;
int i;
......@@ -420,8 +420,7 @@ int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct b
assert(node->layout_version == BRT_LAYOUT_VERSION);
wbuf_int(&w, node->layout_version);
wbuf_int(&w, node->layout_version_original);
assert(node->desc == &h->descriptor);
serialize_descriptor_contents_to_wbuf(&w, node->desc);
toku_serialize_descriptor_contents_to_wbuf(&w, node->desc);
//printf("%s:%d %lld.calculated_size=%d\n", __FILE__, __LINE__, off, calculated_size);
wbuf_uint(&w, node->nodesize);
wbuf_uint(&w, node->flags);
......@@ -580,7 +579,7 @@ int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct b
compressed_len = compressed_ptr - compressed_base_ptr;
if (0) printf("Block %" PRId64 " Size before compressing %u, after compression %"PRIu64"\n", blocknum.b, calculated_size-uncompressed_magic_len, (uint64_t) compressed_len);
//if (0) printf("Block %" PRId64 " Size before compressing %u, after compression %"PRIu64"\n", blocknum.b, calculated_size-uncompressed_magic_len, (uint64_t) compressed_len);
// write out the compression header
uint32_t *compressed_header_ptr = (uint32_t *)(compressed_buf + uncompressed_magic_len);
......@@ -592,6 +591,27 @@ int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct b
compressed_header_ptr += 2;
}
*n_bytes_to_write = uncompressed_magic_len + compression_header_len + compressed_len;
*bytes_to_write = compressed_buf;
assert(w.ndone==calculated_size);
toku_free(buf);
return 0;
}
int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct brt_header *h, int n_workitems, int n_threads, BOOL for_checkpoint) {
assert(node->desc == &h->descriptor);
size_t n_to_write;
char *compressed_buf;
{
int r = toku_serialize_brtnode_to_memory (node, n_workitems, n_threads,
&n_to_write, &compressed_buf);
if (r!=0) return r;
}
//write_now: printf("%s:%d Writing %d bytes\n", __FILE__, __LINE__, w.ndone);
{
// If the node has never been written, then write the whole buffer, including the zeros
......@@ -600,7 +620,6 @@ int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct b
//printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, h->translated_blocknum_limit, blocknum.b);
//printf("%s:%d allocator=%p\n", __FILE__, __LINE__, h->block_allocator);
//printf("%s:%d bt=%p\n", __FILE__, __LINE__, h->block_translation);
size_t n_to_write = uncompressed_magic_len + compression_header_len + compressed_len;
DISKOFF offset;
//h will be dirtied
......@@ -612,8 +631,6 @@ int toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct b
}
//printf("%s:%d wrote %d bytes for %lld size=%lld\n", __FILE__, __LINE__, w.ndone, off, size);
assert(w.ndone==calculated_size);
toku_free(buf);
toku_free(compressed_buf);
node->dirty = 0; // See #1957. Must set the node to be clean after serializing it so that it doesn't get written again on the next checkpoint or eviction.
return 0;
......@@ -1033,7 +1050,7 @@ decompress_brtnode_from_raw_block_into_rbuf_versioned(u_int32_t version, u_int8_
static int
deserialize_brtnode_from_rbuf_versioned (u_int32_t version, BLOCKNUM blocknum, u_int32_t fullhash, BRTNODE *brtnode, struct brt_header *h, struct rbuf *rb) {
int r;
int r = 0;
BRTNODE brtnode_10 = NULL;
BRTNODE brtnode_11 = NULL;
......@@ -1301,7 +1318,7 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) {
}
u_int32_t
toku_serialize_descriptor_size(struct descriptor *desc) {
toku_serialize_descriptor_size(const struct descriptor *desc) {
//Checksum NOT included in this. Checksum only exists in header's version.
u_int32_t size = 4+ //version
4; //size
......@@ -1309,8 +1326,8 @@ toku_serialize_descriptor_size(struct descriptor *desc) {
return size;
}
static void
serialize_descriptor_contents_to_wbuf(struct wbuf *wb, struct descriptor *desc) {
void
toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const struct descriptor *desc) {
if (desc->version==0) assert(desc->dbt.size==0);
wbuf_int(wb, desc->version);
wbuf_bytes(wb, desc->dbt.data, desc->dbt.size);
......@@ -1320,13 +1337,13 @@ serialize_descriptor_contents_to_wbuf(struct wbuf *wb, struct descriptor *desc)
//descriptor.
//Descriptors are NOT written during the header checkpoint process.
int
toku_serialize_descriptor_contents_to_fd(int fd, struct descriptor *desc, DISKOFF offset) {
toku_serialize_descriptor_contents_to_fd(int fd, const struct descriptor *desc, DISKOFF offset) {
int r = 0;
// make the checksum
int64_t size = toku_serialize_descriptor_size(desc)+4; //4 for checksum
struct wbuf w;
wbuf_init(&w, toku_xmalloc(size), size);
serialize_descriptor_contents_to_wbuf(&w, desc);
toku_serialize_descriptor_contents_to_wbuf(&w, desc);
{
//Add checksum
u_int32_t checksum = x1764_finish(&w.checksum);
......
This diff is collapsed.
......@@ -30,10 +30,12 @@ typedef int(*BRT_GET_STRADDLE_CALLBACK_FUNCTION)(ITEMLEN, bytevec, ITEMLEN, byte
int toku_open_brt (const char *fname, int is_create, BRT *, int nodesize, CACHETABLE, TOKUTXN, int(*)(DB*,const DBT*,const DBT*), DB*);
int toku_redirect_brt (const char *fname_in_env, BRT brt, TOKUTXN txn);
int toku_dictionary_redirect (const char *dst_fname_in_env, const char *dst_fname_in_cwd, BRT old_brt, TOKUTXN txn);
// See the brt.c file for what this toku_redirect_brt does
u_int32_t toku_serialize_descriptor_size(struct descriptor *desc);
int toku_dictionary_redirect_abort(struct brt_header *old_h, struct brt_header *new_h, TOKUTXN txn);
u_int32_t toku_serialize_descriptor_size(const struct descriptor *desc);
int toku_brt_create(BRT *);
int toku_brt_set_flags(BRT, unsigned int flags);
int toku_brt_set_descriptor (BRT t, u_int32_t version, const DBT* descriptor, toku_dbt_upgradef dbt_userformat_upgrade);
......@@ -44,11 +46,13 @@ int toku_brt_get_nodesize(BRT, unsigned int *nodesize);
int toku_brt_set_bt_compare(BRT, brt_compare_func);
int toku_brt_set_dup_compare(BRT, brt_compare_func);
int toku_brt_set_filenum(BRT brt, FILENUM filenum);
brt_compare_func toku_brt_get_bt_compare (BRT brt);
int brt_set_cachetable(BRT, CACHETABLE);
int toku_brt_open(BRT, const char *fname, const char *fname_in_env, int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, DB *db);
int toku_brt_open_recovery(BRT, const char *fname, const char *fname_in_env, int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, DB *db, int recovery_force_fcreate);
int toku_brt_open(BRT, const char *fname_in_env, const char *fname_in_cwd,
int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, DB *db);
int toku_brt_open_recovery(BRT, const char *fname_in_env, const char *fname_in_cwd,
int is_create, int only_create, CACHETABLE ct, TOKUTXN txn, DB *db, int recovery_force_fcreate, FILENUM use_filenum);
int toku_brt_remove_subdb(BRT brt, const char *dbname, u_int32_t flags);
......@@ -162,7 +166,7 @@ extern int toku_brt_do_push_cmd; // control whether push occurs eagerly.
// TODO: Get rid of this
int toku_brt_dbt_set(DBT* key, DBT* key_source);
int toku_brt_get_fd(BRT, int *);
DICTIONARY_ID toku_brt_get_dictionary_id(BRT);
int toku_brt_height_of_root(BRT, int *height); // for an open brt, return the current height.
......@@ -185,7 +189,9 @@ int toku_brt_stat64 (BRT, TOKUTXN,
struct brtstat64_s *stat
);
int toku_brt_init(void (*ydb_lock_callback)(void), void (*ydb_unlock_callback)(void));
int toku_brt_init(void (*ydb_lock_callback)(void),
void (*ydb_unlock_callback)(void),
void (*db_set_brt)(DB*,BRT));
int toku_brt_destroy(void);
int toku_pwrite_lock_init(void);
int toku_pwrite_lock_destroy(void);
......
......@@ -306,7 +306,7 @@ main (int argc, const char *argv[]) {
assert(r == 0);
CACHEFILE cf;
FILENUM fn={0};
r = toku_cachetable_openfd_with_filenum (&cf, ct, f, n, FALSE, fn, FALSE);
r = toku_cachetable_openfd_with_filenum (&cf, ct, f, n, n, FALSE, fn, FALSE);
assert(r==0);
dump_header(f, &h, cf);
if (interactive) {
......
#include <db.h>
#include "brttypes.h"
#include "brtloader.h"
/* These functions are exported to allow the tests to compile. */
int brtloader_open_temp_file (BRTLOADER bl, FILE **filep, char **fnamep);
struct brtloader_s {
int panic;
int panic_errno;
generate_row_for_put_func generate_row_for_put;
brt_compare_func *bt_compare_funs;
DB *src_db;
int N;
DB **dbs;
const struct descriptor **descriptors; // N of these
const char **new_fnames_in_env; // the file names that the final data will be written to (relative to env).
const char **new_fnames_in_cwd; // the file names that the final data will be written to (relative to cwd).
const char *temp_file_template;
FILE *fprimary_rows; char *fprimary_rows_name;
FILE *fprimary_idx; char *fprimary_idx_name;
u_int64_t fprimary_offset;
};
/* These data structures are used for manipulating a collection of rows in main memory. */
struct row {
char *data;
int klen,vlen;
};
struct rowset {
size_t n_rows, n_rows_limit;
struct row *rows;
size_t n_bytes, n_bytes_limit;
char *data;
};
void init_rowset (struct rowset *rows);
void destroy_rowset (struct rowset *rows);
void add_row (struct rowset *rows, DBT *key, DBT *val);
int loader_write_row(DBT *key, DBT *val, FILE *data, FILE *idx, u_int64_t *dataoff, BRTLOADER bl);
int loader_read_row (FILE *f, DBT *key, DBT *val, BRTLOADER bl);
void merge (struct row dest[/*an+bn*/], struct row a[/*an*/], int an, struct row b[/*bn*/], int bn,
DB *dest_db, brt_compare_func);
void mergesort_row_array (struct row rows[/*n*/], int n, DB *dest_db, brt_compare_func);
struct fileset {
int n_temp_files, n_temp_files_limit;
char **temp_data_names;
char **temp_idx_names;
};
void init_fileset (struct fileset *fs);
int sort_and_write_rows (struct rowset *rows, struct fileset *fs, BRTLOADER bl, DB *dest_db, brt_compare_func);
int merge_files (struct fileset *fs, BRTLOADER bl, DB *dest_db, brt_compare_func);
int write_file_to_dbfile (int outfile, FILE *infile, BRTLOADER bl, const struct descriptor *descriptor);
This diff is collapsed.
/* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef BRTLOADER_H
#define BRTLOADER_H
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
typedef struct brtloader_s *BRTLOADER;
int toku_brt_loader_open (BRTLOADER *bl,
generate_row_for_put_func g,
DB *src_db,
int N,
DB *dbs[/*N*/],
const struct descriptor *descriptors[/*N*/],
const char * new_fnames_in_env[/*N*/],
const char * new_fnames_in_cwd[/*N*/],
brt_compare_func bt_compare_functions[/*N*/],
const char *temp_file_template);
int toku_brt_loader_put (BRTLOADER bl, DBT *key, DBT *val);
int toku_brt_loader_close (BRTLOADER bl);
#endif // BRTLOADER_H
......@@ -30,6 +30,7 @@ typedef const void *bytevec;
typedef int64_t DISKOFF; /* Offset in a disk. -1 is the NULL pointer. */
typedef u_int64_t TXNID;
#define TXNID_NONE_LIVING ((TXNID)0)
#define TXNID_NONE ((TXNID)0)
typedef struct s_blocknum { int64_t b; } BLOCKNUM; // make a struct so that we will notice type problems.
......@@ -45,10 +46,17 @@ typedef struct __toku_lsn { u_int64_t lsn; } LSN;
#define ZERO_LSN ((LSN){0})
#define MAX_LSN ((LSN){UINT64_MAX})
/* Make the FILEID a struct for the same reason. */
typedef struct __toku_fileid { u_int32_t fileid; } FILENUM;
/* At the brt layer, a FILENUM uniquely identifies an open file.
* At the ydb layer, a DICTIONARY_ID uniquely identifies an open dictionary.
* With the introduction of the loader (ticket 2216), it is possible for the file that holds
* an open dictionary to change, so these are now separate and independent unique identifiers.
*/
typedef struct {u_int32_t fileid;} FILENUM;
#define FILENUM_NONE ((FILENUM){UINT32_MAX})
typedef struct {u_int32_t dictid;} DICTIONARY_ID;
#define DICTIONARY_ID_NONE ((DICTIONARY_ID){0})
typedef struct {
u_int32_t num;
FILENUM *filenums;
......
This diff is collapsed.
......@@ -47,9 +47,9 @@ int toku_create_cachetable(CACHETABLE */*result*/, long size_limit, LSN initial_
// During a transaction, we cannot reuse a filenum.
int toku_cachefile_of_filenum (CACHETABLE t, FILENUM filenum, CACHEFILE *cf);
// What is the cachefile that goes with a particular iname?
// What is the cachefile that goes with a particular iname (relative to env)?
// During a transaction, we cannot reuse an iname.
int toku_cachefile_of_iname (CACHETABLE ct, const char *iname, CACHEFILE *cf);
int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CACHEFILE *cf);
// TODO: #1510 Add comments on how these behave
int toku_cachetable_begin_checkpoint (CACHETABLE ct, TOKULOGGER);
......@@ -68,15 +68,19 @@ int toku_cachetable_close (CACHETABLE*); /* Flushes everything to disk, and dest
// Get the number of cachetable misses (in misscount) and the accumulated time waiting for reads (in misstime, units of microseconds)
void toku_cachetable_get_miss_times(CACHETABLE ct, uint64_t *misscount, uint64_t *misstime);
// Open a file and bind the file to a new cachefile object.
int toku_cachetable_openf (CACHEFILE *,CACHETABLE, const char */*fname*/, const char */*fname_relative_to_env*/,int flags, mode_t mode);
// Open a file and bind the file to a new cachefile object. (For use by test programs only.)
int toku_cachetable_openf (CACHEFILE *,CACHETABLE, const char */*fname_in_env*/, const char */*fname_in_cwd*/,int flags, mode_t mode);
// Bind a file to a new cachefile object.
int toku_cachetable_openfd (CACHEFILE *,CACHETABLE, int /*fd*/, const char *fname_relative_to_env /*(used for logging)*/);
int toku_cachetable_openfd_with_filenum (CACHEFILE *,CACHETABLE, int /*fd*/, const char *fname_relative_to_env, BOOL with_filenum, FILENUM filenum, BOOL reserved);
int toku_cachetable_openfd (CACHEFILE *,CACHETABLE, int /*fd*/,
const char *fname_relative_to_env /*(used for logging)*/,
const char *fname_in_cwd);
int toku_cachetable_openfd_with_filenum (CACHEFILE *,CACHETABLE, int /*fd*/,
const char *fname_in_env, const char *fname_in_cwd,
BOOL with_filenum, FILENUM filenum, BOOL reserved);
// Change the binding of which file is attached to a cachefile. Close the old fd. Use the new fd.
int toku_cachetable_redirect (CACHEFILE cf, int fd, const char *fname_in_env);
int toku_cachefile_redirect (CACHEFILE cf, int fd, const char *fname_in_env, const char *fname_in_cwd);
int toku_cachetable_reserve_filenum (CACHETABLE ct, FILENUM *reserved_filenum, BOOL with_filenum, FILENUM filenum);
......@@ -124,6 +128,9 @@ void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata,
void *toku_cachefile_get_userdata(CACHEFILE);
// Effect: Get the user data.
CACHETABLE toku_cachefile_get_cachetable(CACHEFILE cf);
// Effect: Get the cachetable.
// Put a memory object into the cachetable.
// Effects: Lookup the key in the cachetable. If the key is not in the cachetable,
// then insert the pair and pin it. Otherwise return an error. Some of the key
......@@ -210,6 +217,15 @@ int toku_cachefile_flush (CACHEFILE);
// Return the file descriptor
int toku_cachefile_fd (CACHEFILE);
// Get the iname (within the environment) associated with the cachefile
// Return the filename
char * toku_cachefile_fname_in_env (CACHEFILE cf);
// Get the iname (within the cwd) associated with the cachefile
// Return the filename
char * toku_cachefile_fname_in_cwd (CACHEFILE cf);
// For test programs only.
// Set the cachefile's fd and fname.
// Effect: Bind the cachefile to a new fd and fname. The old fd is closed.
// Returns: 0 if success, otherwise an error number
......
......@@ -88,7 +88,7 @@ struct tokulogger {
u_int32_t write_block_size; // How big should the blocks be written to various logs?
TXNID oldest_living_xid;
void (*remove_finalize_callback) (int, void*); // ydb-level callback to be called when a transaction that ...
void (*remove_finalize_callback) (DICTIONARY_ID, void*); // ydb-level callback to be called when a transaction that ...
void * remove_finalize_callback_extra; // ... deletes a file is committed or when one that creates a file is aborted.
};
......
......@@ -79,6 +79,9 @@ const struct logtype rollbacks[] = {
{"load", 'l', FA{{"BYTESTRING", "old_iname", 0},
{"BYTESTRING", "new_iname", 0},
NULLFIELD}},
{"dictionary_redirect", 'R', FA{{"FILENUM", "old_filenum", 0},
{"FILENUM", "new_filenum", 0},
NULLFIELD}},
// {"fclose", 'c', FA{{"FILENUM", "filenum", 0},
// {"BYTESTRING", "fname", 0},
// NULLFIELD}},
......
......@@ -1038,7 +1038,7 @@ toku_logger_get_next_lsn(TOKULOGGER logger) {
// called from toku_env_open()
void
toku_logger_set_remove_finalize_callback(TOKULOGGER logger, void (*funcp)(int, void *), void * extra) {
toku_logger_set_remove_finalize_callback(TOKULOGGER logger, void (*funcp)(DICTIONARY_ID, void *), void * extra) {
logger->remove_finalize_callback = funcp;
logger->remove_finalize_callback_extra = extra;
}
......@@ -1047,7 +1047,7 @@ toku_logger_set_remove_finalize_callback(TOKULOGGER logger, void (*funcp)(int, v
// when a transaction that created a file is aborted.
// During recovery, there is no ydb layer, so no callback exists.
void
toku_logger_call_remove_finalize_callback(TOKULOGGER logger, int fd) {
toku_logger_call_remove_finalize_callback(TOKULOGGER logger, DICTIONARY_ID dict_id) {
if (logger->remove_finalize_callback)
logger->remove_finalize_callback(fd, logger->remove_finalize_callback_extra);
logger->remove_finalize_callback(dict_id, logger->remove_finalize_callback_extra);
}
......@@ -84,8 +84,8 @@ void toku_logger_note_checkpoint(TOKULOGGER logger, LSN lsn);
TXNID toku_logger_get_oldest_living_xid(TOKULOGGER logger);
LSN toku_logger_get_oldest_living_lsn(TOKULOGGER logger);
LSN toku_logger_get_next_lsn(TOKULOGGER logger);
void toku_logger_set_remove_finalize_callback(TOKULOGGER logger, void (*funcp)(int, void *), void * extra);
void toku_logger_call_remove_finalize_callback(TOKULOGGER logger, int fd);
void toku_logger_set_remove_finalize_callback(TOKULOGGER logger, void (*funcp)(DICTIONARY_ID, void *), void * extra);
void toku_logger_call_remove_finalize_callback(TOKULOGGER logger, DICTIONARY_ID dict_id);
int toku_logger_make_space_in_inbuf (TOKULOGGER logger, int n_bytes_needed);
......
include newbrt/tests/make.include
NEWBRT_BINS = $(patsubst %,newbrt/%$(BINSUF),$(BINS_RAW))
......@@ -57,9 +57,9 @@ BRT_C_FILES = $(patsubst %,newbrt/%.c,$(BRT_SOURCES))
BRT_O_FILES = $(patsubst %,newbrt/%.$(OEXT),$(BRT_SOURCES))
$(TEST_NEWBRT): newbrt/log_header.h
$(TEST_NEWBRT) $(BRT_O_FILES) newbrt/newbrt.$(OEXT): CPPFLAGS += -Inewbrt -Iinclude
$(TEST_NEWBRT) $(BRT_O_FILES) newbrt/newbrt.$(OEXT): CPPFLAGS_DIRECTORY = -Inewbrt -Iinclude
newbrt/newbrt.$(OEXT): $(BRT_C_FILES) $(DEPEND_COMPILE)
$(CC) -c $(BRT_C_FILES) $(COMBINE_C) $(CPPFLAGS) $(CFLAGS) $(OOUTPUT)$@
$(CC) -c $(BRT_C_FILES) $(COMBINE_C) $(CPPFLAGS) $(CPPFLAGS_DIRECTORY) $(CFLAGS) $(CFLAGS_DIRECTORY) $(OOUTPUT)$@
ifneq ($(CYGWIN),)
NEWBRT_O_FILES = $(BRT_O_FILES)
......@@ -71,10 +71,21 @@ else
NEWBRT_O_FILES = newbrt/newbrt.o
endif
NEWBRT_O_FILES += newbrt/brtloader.o
$(NEWBRT_O_FILES): VISIBILITY=-fvisibility=default
$(NEWBRT_O_FILES): CPPFLAGS_DIRECTORY=-Iinclude -Itoku_include -I$(OS)
$(NEWBRT_O_FILES): SHADOW=
$(NEWBRT_A): $(NEWBRT_O_FILES)
newbrt/libnewbrt.$(SOEXT): $(NEWBRT_O_FILES)
newbrt/libnewbrt.$(SOEXT): VISIBILITY=-fvisibility=default
newbrt/libnewbrt.$(SOEXT): LOADLIBES_DIRECTORY=-lz -Llib -ltokuportability
newbrt/libnewbrt.$(SOEXT): CFLAGS_DIRECTORY=-lpthread
newbrt/brtloader.$(OEXT): CPPFLAGS_DIRECTORY=-Iinclude
newbrt/logformat$(BINSUF): CPPFLAGS_DIRECTORY=-Iinclude -Itoku_include -I$(OS)
newbrt/build: newbrt/bins newbrt/libs $(TEST_NEWBRT)
newbrt/build: newbrt/bins newbrt/libs $(TEST_NEWBRT) newbrt/tests/build
newbrt/check: newbrt/tests/check
newbrt/bins: $(NEWBRT_BINS)
newbrt/libs: $(NEWBRT_A)
.PHONY: newbrt/build newbrt/bins newbrt/libs
......@@ -149,6 +149,7 @@ static void file_map_remove(struct file_map *fmap, FILENUM fnum) {
}
}
// Look up file info: given FILENUM, return file_map_tuple (or DB_NOTFOUND)
static int file_map_find(struct file_map *fmap, FILENUM fnum, struct file_map_tuple **file_map_tuple) {
OMTVALUE v; u_int32_t idx;
int r = toku_omt_find_zero(fmap->filenums, file_map_h, &fnum, &v, &idx, NULL);
......@@ -249,9 +250,6 @@ static int internal_toku_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_c
if (!(treeflags & TOKU_DB_VALCMP_BUILTIN) && renv->dup_compare)
toku_brt_set_dup_compare(brt, renv->dup_compare);
// bind to filenum when opened
toku_brt_set_filenum(brt, filenum);
// TODO mode (FUTURE FEATURE)
mode = mode;
......@@ -263,7 +261,7 @@ static int internal_toku_recover_fopen_or_fcreate (RECOVER_ENV renv, BOOL must_c
r = toku_brt_set_descriptor(brt, descriptor_version, &descriptor_dbt, abort_on_upgrade);
if (r!=0) goto close_brt;
}
r = toku_brt_open_recovery(brt, iname, iname, must_create, must_create, renv->ct, txn, fake_db, recovery_force_fcreate);
r = toku_brt_open_recovery(brt, iname, iname, must_create, must_create, renv->ct, txn, fake_db, recovery_force_fcreate, filenum);
if (r != 0) {
close_brt:
;
......@@ -371,12 +369,13 @@ static int toku_recover_backward_fopen (struct logtype_fopen *l, RECOVER_ENV ren
return 0;
}
// if file referred to in l is open, close it
static int toku_recover_fclose (struct logtype_fclose *l, RECOVER_ENV renv) {
struct file_map_tuple *tuple = NULL;
int r = file_map_find(&renv->fmap, l->filenum, &tuple);
if (r == 0) {
if (r == 0) { // if file is open
char *iname = fixup_fname(&l->iname);
assert(strcmp(tuple->iname, iname) == 0);
assert(strcmp(tuple->iname, iname) == 0); // verify that file_map has same iname as log entry
toku_free(iname);
DB *fake_db = tuple->brt->db; //Need to free the fake db that was malloced
......@@ -553,8 +552,10 @@ static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple
assert(r==0);
r = toku_brt_maybe_insert(tuple->brt, &dest_key, &dest_val, txn, TRUE, l->lsn, FALSE, BRT_INSERT);
assert(r == 0);
//flags==0 indicates the return values are stored in temporary memory that does
//not need to be freed. We need to continue using DB_DBT_REALLOC however.
//flags==0 means generate_row_for_put callback changed it
//(and freed any memory necessary to do so) so that values are now stored
//in temporary memory that does not need to be freed. We need to continue
//using DB_DBT_REALLOC however.
if (dest_key.flags == 0) {
toku_init_dbt(&dest_key);
dest_key.flags = DB_DBT_REALLOC;
......@@ -564,8 +565,8 @@ static int toku_recover_enq_insert_multiple (struct logtype_enq_insert_multiple
dest_val.flags = DB_DBT_REALLOC;
}
}
if (dest_key.flags & DB_DBT_REALLOC && dest_key.data) toku_free(dest_key.data); //TODO: #2321 May need windows hack
if (dest_val.flags & DB_DBT_REALLOC && dest_val.data) toku_free(dest_val.data); //TODO: #2321 May need windows hack
if (dest_key.data) toku_free(dest_key.data); //TODO: #2321 May need windows hack
if (dest_val.data) toku_free(dest_val.data); //TODO: #2321 May need windows hack
return 0;
}
......
......@@ -29,9 +29,10 @@ toku_commit_fdelete (u_int8_t file_was_open,
r = toku_cachefile_of_filenum(txn->logger->ct, filenum, &cf);
assert(r == 0); // must still be open (toku_brt_remove_on_commit() incremented refcount)
{
int fd = toku_cachefile_fd(cf);
assert(!toku_cachefile_is_dev_null(cf));
toku_logger_call_remove_finalize_callback(txn->logger, fd);
struct brt_header *h = toku_cachefile_get_userdata(cf);
DICTIONARY_ID dict_id = h->dict_id;
toku_logger_call_remove_finalize_callback(txn->logger, dict_id);
}
r = toku_cachefile_redirect_nullfd(cf);
assert(r==0);
......@@ -82,9 +83,10 @@ toku_rollback_fcreate (FILENUM filenum,
int r = toku_cachefile_of_filenum(txn->logger->ct, filenum, &cf);
assert(r == 0);
{
int fd = toku_cachefile_fd(cf);
assert(!toku_cachefile_is_dev_null(cf));
toku_logger_call_remove_finalize_callback(txn->logger, fd);
struct brt_header *h = toku_cachefile_get_userdata(cf);
DICTIONARY_ID dict_id = h->dict_id;
toku_logger_call_remove_finalize_callback(txn->logger, dict_id);
}
r = toku_cachefile_redirect_nullfd(cf);
assert(r==0);
......@@ -361,10 +363,11 @@ toku_rollback_tablelock_on_empty_table (FILENUM filenum,
// If r!=0 it could be because we grabbed a log on an empty table that doesn't even exist, and we never put anything into it.
// So, just don't do anything in this case.
BRT brt = brtv;
yield(toku_checkpoint_safe_client_lock, yield_v);
r = toku_brt_truncate(brt);
assert(r==0);
}
toku_checkpoint_safe_client_unlock();
}
return r;
}
......@@ -383,7 +386,7 @@ toku_commit_load (BYTESTRING UU(old_iname),
void *UU(yield_v),
LSN UU(oplsn))
{
// need to implement
// TODO 2216: need to implement
assert(1);
return 0;
}
......@@ -396,7 +399,46 @@ toku_rollback_load (BYTESTRING UU(old_iname),
void *UU(yield_v),
LSN UU(oplsn))
{
// need to implement
// TODO 2216: need to implement
assert(1);
return 0;
}
int
toku_commit_dictionary_redirect (FILENUM UU(old_filenum),
FILENUM UU(new_filenum),
TOKUTXN UU(txn),
YIELDF UU(yield),
void * UU(yield_v),
LSN UU(oplsn)) //oplsn is the lsn of the commit
{
//NO-OP
return 0;
}
int
toku_rollback_dictionary_redirect (FILENUM old_filenum,
FILENUM new_filenum,
TOKUTXN txn,
YIELDF UU(yield),
void * UU(yield_v),
LSN UU(oplsn)) //oplsn is the lsn of the abort
{
int r = 0;
CACHEFILE new_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf);
assert(r == 0);
struct brt_header *new_h = toku_cachefile_get_userdata(new_cf);
CACHEFILE old_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf);
assert(r == 0);
struct brt_header *old_h = toku_cachefile_get_userdata(old_cf);
//Redirect back from new to old.
r = toku_dictionary_redirect_abort(old_h, new_h, txn);
assert(r==0);
return r;
}
......@@ -375,7 +375,7 @@ static int swap_brt (OMTVALUE txnv, u_int32_t UU(idx), void *extra) {
int toku_txn_note_swap_brt (BRT live, BRT zombie) {
if (zombie->pinned_by_checkpoint) {
//Swap checkpoint responsibility.
assert(!live->pinned_by_checkpoint);
assert(!live->pinned_by_checkpoint); //Pin only uses one brt.
live->pinned_by_checkpoint = 1;
zombie->pinned_by_checkpoint = 0;
}
......
......@@ -14,10 +14,11 @@
static int recovery_main(int argc, const char *argv[]);
static void dummy(void) {}
static void dummy_set_brt(DB *db UU(), BRT brt UU()) {}
int
main(int argc, const char *argv[]) {
toku_brt_init(dummy, dummy);
toku_brt_init(dummy, dummy, dummy_set_brt);
int r = recovery_main(argc, argv);
toku_brt_destroy();
return r;
......
......@@ -29,101 +29,13 @@ endif
# For very verbose output do
# make VERBOSE=2
#test1305 is first, since it is the longest test. Thus reducing the makespan on parallel checks
# Put these one-per-line so that if we insert a new one the svn diff can understand it better.
# Also keep them sorted.
REGRESSION_TESTS_RAW = \
test1305 \
block_allocator_test \
bread-test \
brt-serialize-test \
brt-serialize-sub-block-test \
brt-test \
brt-test-cursor \
brt-test-cursor-2 \
brt-test0 \
brt-test1 \
brt-test2 \
brt-test3 \
brt-test4 \
brt-test5 \
cachetable-rwlock-test \
cachetable-test \
cachetable-test2 \
cachetable-put-test \
cachetable-getandpin-test \
cachetable-unpin-test \
cachetable-rename-test \
cachetable-fd-test \
cachetable-flush-test \
cachetable-count-pinned-test \
cachetable-debug-test \
cachetable-debug-test \
cachetable-checkpoint-pending \
cachetable-checkpoint-test \
cachetable-prefetch-maybegetandpin-test \
cachetable-prefetch2-test \
cachetable-prefetch-close-test \
cachetable-prefetch-close-fail-test \
cachetable-prefetch-close-leak-test \
cachetable-prefetch-getandpin-test \
cachetable-prefetch-getandpin-fail-test \
cachetable-prefetch-checkpoint-test \
cachetable-reserve-filenum \
fifo-test \
list-test \
keyrange \
keyrange-unflat \
keyrange-dupsort \
keyrange-dupsort-unflat \
keytest \
log-test \
log-test2 \
log-test3 \
log-test4 \
log-test5 \
log-test6 \
log-test7 \
logcursor-timestamp \
logcursor-empty-logdir \
logcursor-empty-logfile \
logcursor-empty-logfile-2 \
logcursor-empty-logfile-3 \
memtest \
minicron-test \
omt-cursor-test \
omt-test \
recovery-cbegin \
recovery-cbegin-cend \
recovery-cbegin-cend-hello \
recovery-empty \
recovery-fopen-missing-file \
recovery-hello \
recovery-no-datadir \
recovery-no-log \
recovery-no-logdir \
shortcut \
test1308a \
test-assert \
test-brt-delete-both \
test-brt-overflow \
test-del-inorder \
test-inc-split \
test-leafentry10 \
test-leafentry-nested \
test_logcursor \
test_oexcl \
test_toku_malloc_plain_free \
threadpool-test \
workqueue-test \
x1764-test \
ybt-test \
# This line intentially kept commented so I can have a \ on the end of the previous line
SRCS=$(sort $(filter-out dir.%.c,$(wildcard *.c)))
REGRESSION_TESTS_RAW = $(patsubst %.c,%,$(SRCS))
EXTRA_ARGS=
# Add in the binaries that must be run in various ways.
BINS_RAW = $(REGRESSION_TESTS_RAW) \
benchmark-test \
cachetable-scan \
# This line intentially kept commented so I can have a \ on the end of the previous line
# BINS will be defined by adding .exe if appropriate.
......@@ -151,10 +63,21 @@ check_test1305:
@echo SKIPPED SLOW TEST $@
else
# Don't run 1305 under valgrind. It takes way too long.
check_test1305: test1305$(BINSUF)
check_test1305$(BINSUF): test1305$(BINSUF)
./$< $(VERBVERBOSE) $(SUMMARIZE_CMD)
endif
ifeq ($(TOKU_SKIP_MINICRON),1)
check_minicron-test$(BINSUF):
@echo "SKIPPED TEST $@ (Fails in win64 vm due to timing issues)"
endif
check_benchmark-test$(BINSUF): benchmark-test$(BINSUF);
check_logcursor-fw$(BINSUF): logcursor-fw$(BINSUF);
@echo "$@ must be run manually (needs logs to iterate over)."
check_logcursor-bw$(BINSUF): logcursor-bw$(BINSUF);
@echo "$@ must be run manually (needs logs to iterate over)."
check_benchmarktest_256: benchmark-test$(BINSUF) $(PTHREAD_LOCAL)
$(VGRIND) ./$< $(VERBVERBOSE) --valsize 256 --verify 1 $(SUMMARIZE_CMD)
......@@ -178,11 +101,13 @@ check_test-assert$(BINSUF): test-assert$(BINSUF) $(PTHREAD_LOCAL)
@# one argument, "ok" should not error
$(VGRIND) ./$< ok $(SUMMARIZE_CMD)
check_brtloader-test$(BINSUF): EXTRA_ARGS=dir.brtloader-test
check_%: % $(PTHREAD_LOCAL)
ifeq ($(VGRIND),)
./$< $(VERBVERBOSE) $(SUMMARIZE_CMD)
./$< $(VERBVERBOSE) $(EXTRA_ARGS) $(SUMMARIZE_CMD)
else
$(VGRIND) --log-file=$<.check.valgrind ./$< $(VERBVERBOSE); \
$(VGRIND) --log-file=$<.check.valgrind ./$< $(VERBVERBOSE) $(EXTRA_ARGS); \
if [ $$? -eq 0 ] ; then \
lines=`cat $<.check.valgrind | wc -l`; \
if [ $$lines -ne 0 ] ; then cat $<.check.valgrind; test 0 = 1; fi \
......@@ -194,11 +119,14 @@ endif
benchmark-test.$(OEXT): ../brt.h ../brt-search.h ../../include/db.h
brtloader_test$(BINSUF): ../brtloader-internal.h ../brtloader.o
../brtloader.$(OEXT): ../brtloader.c ../brtloader-internal.h
cd $(@D) && $(MAKE) $(@F)
clean:
rm -rf log-test7.c.dir_*
rm -rf *.dir dir.*
rm -f cachetable-fd-test.ctest2.data test_oexcl.c.tmp
rm -f test_oexcl.c.tmp
rm -f *.brt *.clean *.dirty *.tdb *.dat *.data *.out *.check.valgrind
$(BINS): $(LINK_FILES)
......
#include "test.h"
#include <toku_assert.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include "brtloader-internal.h"
#include "memory.h"
static int qsort_compare_ints (const void *a, const void *b) {
int avalue = *(int*)a;
int bvalue = *(int*)b;
if (avalue<bvalue) return -1;
if (avalue>bvalue) return +1;
return 0;
}
static int compare_ints (DB *dest_db, const DBT *akey, const DBT *bkey) {
assert(dest_db==NULL);
assert(akey->size==sizeof(int));
assert(bkey->size==sizeof(int));
return qsort_compare_ints(akey->data, bkey->data);
}
static void test_merge_internal (int a[], int na, int b[], int nb) {
struct row *MALLOC_N(na, ar);
struct row *MALLOC_N(nb, br);
for (int i=0; i<na; i++) {
ar[i].data = (void*)&a[i];
ar[i].klen = sizeof(a[i]);
ar[i].vlen = 0;
}
for (int i=0; i<nb; i++) {
br[i].data = (void*)&b[i];
br[i].klen = sizeof(b[i]);
br[i].vlen = 0;
}
struct row *MALLOC_N(na+nb, cr);
DB *dest_db = NULL;
merge(cr, ar, na, br, nb, dest_db, compare_ints);
int i=0;
int j=0;
for (int k=0; k<na+nb; k++) {
int vc = *(int*)cr[k].data;
if (i<na && j<nb) {
if (vc==a[i]) {
assert(a[i]<=b[j]);
i++;
} else if (vc==b[j]) {
assert(a[i]>b[j]);
j++;
} else {
assert(0);
}
}
}
toku_free(cr);
toku_free(ar);
toku_free(br);
}
/* Test the basic merger. */
static void test_merge (void) {
{
int avals[]={1,2,3,4,5};
int *bvals = NULL; //icc won't let us use a zero-sized array explicitly or by [] = {} construction.
test_merge_internal(avals, 5, bvals, 0);
test_merge_internal(bvals, 0, avals, 5);
}
{
int avals[]={1,3,5,7};
int bvals[]={2,4};
test_merge_internal(avals, 4, bvals, 2);
test_merge_internal(bvals, 2, avals, 4);
}
{
int avals[]={1,2,3,5,6,7};
int bvals[]={2,4,5,6,8};
test_merge_internal(avals, 6, bvals, 5);
test_merge_internal(bvals, 5, avals, 6);
}
}
static void test_internal_mergesort_row_array (int a[], int n) {
struct row *MALLOC_N(n, ar);
for (int i=0; i<n; i++) {
ar[i].data = (void*)&a[i];
ar[i].klen = sizeof(a[i]);
ar[i].vlen = 0;
}
mergesort_row_array (ar, n, NULL, compare_ints);
int *MALLOC_N(n, tmp);
for (int i=0; i<n; i++) {
tmp[i]=a[i];
}
qsort(tmp, n, sizeof(a[0]), qsort_compare_ints);
for (int i=0; i<n; i++) {
assert(tmp[i]==*(int*)ar[i].data);
}
toku_free(ar);
toku_free(tmp);
}
static void test_mergesort_row_array (void) {
{
int avals[]={5,2,1,7};
for (int i=0; i<=4; i++)
test_internal_mergesort_row_array(avals, i);
}
for (int i=0; i<100; i++) {
int len=1+random()%100;
int avals[len];
for (int j=0; j<len; j++) {
avals[j] = random()%len;
}
test_internal_mergesort_row_array(avals, len);
}
}
static void test_read_write_rows (char *template) {
struct brtloader_s bl = {.panic = 0,
.temp_file_template = template};
int r;
FILE *file;
char *fname;
r = brtloader_open_temp_file(&bl, &file, &fname);
CKERR(r);
FILE *idx;
char *idxname;
r = brtloader_open_temp_file(&bl, &idx, &idxname);
CKERR(r);
size_t dataoff=0;
char *keystrings[] = {"abc", "b", "cefgh"};
char *valstrings[] = {"defg", "", "xyz"};
size_t actual_size=0;
for (int i=0; i<3; i++) {
DBT key = {.size=strlen(keystrings[i]), .data=keystrings[i]};
DBT val = {.size=strlen(valstrings[i]), .data=valstrings[i]};
r = loader_write_row(&key, &val, file, idx, &dataoff, &bl);
CKERR(r);
actual_size+=key.size + val.size + 8;
}
if (actual_size != dataoff) fprintf(stderr, "actual_size=%ld, dataoff=%ld\n", actual_size, dataoff);
assert(actual_size == dataoff);
r = fclose(file);
CKERR(r);
file = fopen(fname, "r");
assert(file);
{
int n_read=0;
DBT key={.size=0}, val={.size=0};
while (0==loader_read_row(file, &key, &val, &bl)) {
assert(strlen(keystrings[n_read])==key.size);
assert(strlen(valstrings[n_read])==val.size);
assert(0==memcmp(keystrings[n_read], key.data, key.size));
assert(0==memcmp(valstrings[n_read], val.data, val.size));
assert(key.size<=key.ulen);
assert(val.size<=val.ulen);
n_read++;
}
assert(n_read==3);
toku_free(key.data);
toku_free(val.data);
}
r = fclose(file);
CKERR(r);
r = fclose(idx);
CKERR(r);
r = unlink(fname);
CKERR(r);
r = unlink(idxname);
CKERR(r);
toku_free(fname);
toku_free(idxname);
}
static void fill_rowset (struct rowset *rows,
int keys[],
const char *vals[],
int n) {
init_rowset(rows);
for (int i=0; i<n; i++) {
DBT key = {.size=sizeof(keys[i]),
.data=&keys[i]};
DBT val = {.size=strlen(vals[i]),
.data=&vals[i]};
add_row(rows, &key, &val);
}
}
static void test_merge_files (char *template) {
DB *dest_db = NULL;
struct brtloader_s bl = {.panic = 0,
.temp_file_template = template};
int r;
struct fileset fs;
init_fileset(&fs);
int a_keys[] = { 1, 3, 5, 7, 8, 9};
int b_keys[] = { 2, 4, 6 };
const char *a_vals[] = {"a", "c", "e", "g", "h", "i"};
const char *b_vals[] = {"b", "d", "f"};
struct rowset aset, bset;
fill_rowset(&aset, a_keys, a_vals, 6);
fill_rowset(&bset, b_keys, b_vals, 3);
r = sort_and_write_rows(&aset, &fs, &bl, dest_db, compare_ints); CKERR(r);
r = sort_and_write_rows(&bset, &fs, &bl, dest_db, compare_ints); CKERR(r);
assert(fs.n_temp_files==2 && fs.n_temp_files_limit >= fs.n_temp_files);
destroy_rowset(&aset);
destroy_rowset(&bset);
for (int i=0; i<2; i++) assert(fs.temp_data_names[i] != NULL && fs.temp_idx_names[i] != NULL);
r = merge_files(&fs, &bl, dest_db, compare_ints); CKERR(r);
assert(fs.n_temp_files==1);
FILE *inf = fopen(fs.temp_data_names[0], "r");
char *name = toku_strdup(template);
int fd = mkstemp(name);
fprintf(stderr, "Final data in %s\n", name);
assert(r>=0);
struct descriptor desc = {.version = 1, .dbt = (DBT){.size = 4, .data="abcd"}};
r = write_file_to_dbfile(fd, inf, &bl, &desc);
CKERR(r);
r = fclose(inf);
CKERR(r);
r = unlink(fs.temp_data_names[0]);
CKERR(r);
r = unlink(fs.temp_idx_names[0]);
CKERR(r);
toku_free(fs.temp_data_names[0]);
toku_free(fs.temp_idx_names[0]);
toku_free(fs.temp_data_names);
toku_free(fs.temp_idx_names);
toku_free(name);
}
/* Test to see if we can open temporary files. */
int test_main (int argc, const char *argv[]) {
const char *progname=argv[0];
argc--; argv++;
while (argc>0) {
if (strcmp(argv[0],"-v")==0) {
verbose=1;
} else if (strcmp(argv[0],"-q")==0) {
verbose=0;
} else if (argc!=1) {
fprintf(stderr, "Usage:\n %s [-v] [-q] directory\n", progname);
exit(1);
}
else {
break;
}
argc--; argv++;
}
assert(argc==1); // argv[1] is the directory in which to do the test.
const char* directory = argv[0];
int r = toku_os_mkdir(directory, 0755);
if (r!=0) CKERR2(errno, EEXIST);
int templen = strlen(directory)+15;
char template[templen];
int n = snprintf(template, templen, "%s/tempXXXXXX", directory);
assert (n>0 && n<templen);
test_read_write_rows(template);
test_merge();
test_mergesort_row_array();
test_merge_files(template);
return 0;
}
......@@ -12,14 +12,13 @@ static int evicted_keys = 0;
static void
flush (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)),
CACHEKEY k,
void *v __attribute__((__unused__)),
void *e __attribute__((__unused__)),
long s __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
LSN m __attribute__((__unused__)),
BOOL r __attribute__((__unused__))
BOOL w,
BOOL keep,
BOOL f_ckpt __attribute__((__unused__))
) {
assert(w == FALSE);
flush_calls++;
......@@ -34,12 +33,11 @@ static int fetch_calls = 0;
static int
fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)),
CACHEKEY k,
u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
long *sizep __attribute__((__unused__)),
void *extraargs __attribute__((__unused__)),
LSN *written_lsn __attribute__((__unused__))
void **value,
long *sizep,
void *extraargs __attribute__((__unused__))
) {
fetch_calls++;
......@@ -47,7 +45,6 @@ fetch (CACHEFILE f __attribute__((__unused__)),
*value = 0;
*sizep = 1;
*written_lsn = ZERO_LSN;
return 0;
}
......@@ -60,7 +57,7 @@ static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
r = toku_cachetable_openf(&f1, ct, fname1, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
int i;
......@@ -86,10 +83,16 @@ static void cachetable_prefetch_flowcontrol_test (int cachetable_size_limit) {
// wait for everything to finish
sleep(10);
#if 0 //If we flush using reader thread.
assert(flush_evict_calls == cachetable_size_limit);
assert(evicted_keys == (1 << cachetable_size_limit)-1);
#else
assert(flush_evict_calls == 0);
assert(evicted_keys == 0);
#endif
r = toku_cachefile_close(&f1, NULL_LOGGER, 0); assert(r == 0 && f1 == 0);
char *error_string;
r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
if (verbose) printf("%s:%d 0x%x 0x%x\n", __FUNCTION__, __LINE__,
evicted_keys, (1 << (2*cachetable_size_limit))-1);
assert(evicted_keys == (1 << (2*cachetable_size_limit))-1);
......
......@@ -9,8 +9,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
long s __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
LSN m __attribute__((__unused__)),
BOOL r __attribute__((__unused__))
BOOL f_ckpt __attribute__((__unused__))
) {
/* Do nothing */
}
......@@ -21,8 +20,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
long *sizep __attribute__((__unused__)),
void *extraargs __attribute__((__unused__)),
LSN *written_lsn __attribute__((__unused__))
void *extraargs __attribute__((__unused__))
) {
return 0;
}
......@@ -40,7 +38,7 @@ cachetable_unpin_and_remove_test (int n) {
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
r = toku_cachetable_openf(&f1, ct, fname1, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
// generate some random keys
CACHEKEY keys[n]; int nkeys = n;
......@@ -86,7 +84,8 @@ cachetable_unpin_and_remove_test (int n) {
toku_cachetable_get_state(ct, &nentries, NULL, NULL, NULL);
assert(nentries == 0);
r = toku_cachefile_close(&f1, NULL_LOGGER); assert(r == 0 && f1 == 0);
char *error_string;
r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
......@@ -103,7 +102,7 @@ cachetable_put_evict_remove_test (int n) {
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
r = toku_cachetable_openf(&f1, ct, fname1, fname1, O_RDWR|O_CREAT, 0777); assert(r == 0);
u_int32_t hi[n];
for (i=0; i<n; i++)
......@@ -126,7 +125,8 @@ cachetable_put_evict_remove_test (int n) {
r = toku_cachetable_unpin_and_remove(f1, make_blocknum(0));
assert(r == 0);
r = toku_cachefile_close(&f1, NULL_LOGGER); assert(r == 0 && f1 == 0);
char *error_string;
r = toku_cachefile_close(&f1, &error_string, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); assert(r == 0 && ct == 0);
}
......
......@@ -25,7 +25,7 @@ static void test_delete_all (void) {
r = toku_brt_create(&t); assert(r==0);
r = toku_brt_set_flags(t, TOKU_DB_DUP + TOKU_DB_DUPSORT); assert(r == 0);
r = toku_brt_set_nodesize(t, 4096); assert(r == 0);
r = toku_brt_open(t, fname, fname, 0, 1, 1, ct, null_txn, (DB*)0); assert(r==0);
r = toku_brt_open(t, fname, fname, 1, 1, ct, null_txn, (DB*)0); assert(r==0);
u_int32_t i;
for (i=0; i<limit; i++) {
char key[100];
......@@ -45,7 +45,7 @@ static void test_delete_all (void) {
r = toku_brt_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_brt_create(&t); assert(r==0);
r = toku_brt_open(t, fname, fname, 0, 0, 0, ct, null_txn, (DB*)0); assert(r==0);
r = toku_brt_open(t, fname, fname, 0, 0, ct, null_txn, (DB*)0); assert(r==0);
// Don't do a dump here, because that will warm the cachetable. We want subsequent inserts to be buffered at the root.
......@@ -80,10 +80,8 @@ static void test_delete_all (void) {
{
BRT_CURSOR cursor = 0;
r = toku_brt_cursor(t, &cursor, 0); assert(r==0);
DBT kbt, vbt;
toku_init_dbt(&kbt); kbt.flags = DB_DBT_MALLOC;
toku_init_dbt(&vbt); vbt.flags = DB_DBT_MALLOC;
r = toku_brt_cursor_get(cursor, &kbt, &vbt, DB_FIRST, null_txn);
struct check_pair pair = {len_ignore, NULL, len_ignore, NULL, 0};
r = toku_brt_cursor_get(cursor, NULL, NULL, lookup_checkf, &pair, DB_FIRST);
assert(r == DB_NOTFOUND);
r = toku_brt_cursor_close(cursor);
......
#test1305 is first, since it is the longest test. Thus reducing the makespan on parallel checks
# Put these one-per-line so that if we insert a new one the svn diff can understand it better.
# Also keep them sorted.
NEWBRT_TESTS_RAW = \
test1305 \
block_allocator_test \
bread-test \
brtloader_open_temp_file \
brt-serialize-test \
brt-serialize-sub-block-test \
brt-test \
brt-test-cursor \
brt-test-cursor-2 \
brt-test0 \
brt-test1 \
brt-test2 \
brt-test3 \
brt-test4 \
brt-test5 \
cachetable-rwlock-test \
cachetable-test \
cachetable-test2 \
cachetable-put-test \
cachetable-getandpin-test \
cachetable-unpin-test \
cachetable-rename-test \
cachetable-fd-test \
cachetable-flush-test \
cachetable-count-pinned-test \
cachetable-debug-test \
cachetable-debug-test \
cachetable-checkpoint-pending \
cachetable-checkpoint-test \
cachetable-prefetch-maybegetandpin-test \
cachetable-prefetch2-test \
cachetable-prefetch-close-test \
cachetable-prefetch-close-fail-test \
cachetable-prefetch-close-leak-test \
cachetable-prefetch-getandpin-test \
cachetable-prefetch-getandpin-fail-test \
cachetable-prefetch-checkpoint-test \
cachetable-reserve-filenum \
fifo-test \
list-test \
keyrange \
keyrange-unflat \
keyrange-dupsort \
keyrange-dupsort-unflat \
keytest \
log-test \
log-test2 \
log-test3 \
log-test4 \
log-test5 \
log-test6 \
log-test7 \
logcursor-timestamp \
logcursor-empty-logdir \
logcursor-empty-logfile \
logcursor-empty-logfile-2 \
memtest \
minicron-test \
omt-cursor-test \
omt-test \
recovery-cbegin \
recovery-cbegin-cend \
recovery-cbegin-cend-hello \
recovery-empty \
recovery-fopen-missing-file \
recovery-hello \
recovery-no-datadir \
recovery-no-log \
recovery-no-logdir \
shortcut \
test1308a \
test-assert \
test-brt-delete-both \
test-brt-overflow \
test-del-inorder \
test-inc-split \
test-leafentry10 \
test-leafentry-nested \
test_logcursor \
test_oexcl \
test_toku_malloc_plain_free \
threadpool-test \
workqueue-test \
x1764-test \
ybt-test \
# This line intentially kept commented so I can have a \ on the end of the previous line
# Add in the binaries that must be run in various ways.
NEWBRT_TESTS_BINS_RAW = $(NEWBRT_TESTS_RAW) \
benchmark-test \
cachetable-scan \
# This line intentially kept commented so I can have a \ on the end of the previous line
# BINS will be defined by adding .exe if appropriate.
NEWBRT_TESTS_BINS = $(patsubst %,newbrt/tests/%$(BINSUF),$(NEWBRT_TESTS_BINS_RAW))
# Use the "|" rule to say that these libraries must be built before the executables, but don't rebuild these just because the library gets rebuilt.
$(NEWBRT_TESTS_BINS): | lib/libtokuportability.$(SOEXT) newbrt/libnewbrt.$(SOEXT)
$(NEWBRT_TESTS_BINS): CPPFLAGS+=-Iinclude -Inewbrt
$(NEWBRT_TESTS_BINS): LOADLIBES+=-Lnewbrt -lnewbrt -Llib -ltokuportability -lz -lpthread
$(NEWBRT_TESTS_BINS): LDFLAGS+= -Wl,-rpath,newbrt -Wl,-rpath,lib
NEWBRT_TESTS_CHECKS = \
benchmarktest_256 \
test-assertA \
test-assertB \
$(NEWBRT_TESTS_RAW) \
#
NEWBRT_TESTS_RUN_CHECKS = $(patsubst %,newbrt/tests/check_%,$(NEWBRT_TESTS_CHECKS))
newbrt/tests/build: $(NEWBRT_TESTS_BINS)
newbrt/tests/check: $(NEWBRT_TESTS_RUN_CHECKS)
newbrt/tests/check_benchmarktest_256: newbrt/tests/benchmark-test$(BINSUF) $(PTHREAD_LOCAL)
$(VGRIND) ./$< $(VERBVERBOSE) --valsize 256 --verify 1 $(SUMMARIZE_CMD)
newbrt/tests/check_test-assertA: newbrt/tests/test-assert$(BINSUF) $(PTHREAD_LOCAL)
@# no arguments, should err
$(VGRIND) ./$< > /dev/null 2>&1 ; test $$? = 1 $(SUMMARIZE_CMD)
ifeq ($(OS_CHOICE),windows)
TEST_ASSERT_FAILCODE=134 #Does not use raise(SIGABRT) to fail assert, so will not overwrite 134 with 1.
else
TEST_ASSERT_FAILCODE=1
endif
newbrt/tests/check_test-assertB: newbrt/tests/test-assert$(BINSUF) $(PTHREAD_LOCAL)
@# one argument, not "ok" should err
@rm -f test-assert.out
($(VGRIND) ./$< notok) > test-assert.out 2>&1 ; test $$? = $(TEST_ASSERT_FAILCODE) && fgrep failed test-assert.out > /dev/null $(SUMMARIZE_CMD)
newbrt/tests/check_test-assert: newbrt/tests/test-assert$(BINSUF) $(PTHREAD_LOCAL)
@# one argument, "ok" should not error
$(VGRIND) ./$< ok $(SUMMARIZE_CMD)
newbrt/tests/check_test1305: VGRIND=
newbrt/tests/check_%: newbrt/tests/%$(BINSUF)
$(VGRIND) ./$< $(VERBVERBOSE) $(SUMMARIZE_CMD)
newbrt/tests/brtloader_open_temp_file.$(OEXT): CPPFLAGS+=-Inewbrt -Iinclude
newbrt/tests/brtloader_open_temp_file.$(OEXT): newbrt/brtloader-internal.h
......@@ -18,11 +18,14 @@ static double elapsed (void) {
return tdiff(&now, &starttime);
}
static int __attribute__((__noreturn__))
static int
#ifndef GCOV
__attribute__((__noreturn__))
#endif
never_run (void *a) {
assert(a==0);
assert(0);
#if TOKU_WINDOWS
#if TOKU_WINDOWS || defined(GCOV)
return 0; //ICC ignores the noreturn attribute.
#endif
}
......
......@@ -17,12 +17,18 @@ run_test(void) {
TOKULOGGER logger;
r = toku_logger_create(&logger); assert(r == 0);
r = toku_logger_open(TESTDIR, logger); assert(r == 0);
r = toku_log_end_checkpoint(logger, NULL, FALSE, 42, 0); assert(r == 0);
LSN firstbegin = ZERO_LSN;
r = toku_log_begin_checkpoint(logger, &firstbegin, TRUE, 0); assert(r == 0);
assert(firstbegin.lsn != ZERO_LSN.lsn);
r = toku_log_end_checkpoint(logger, NULL, FALSE, firstbegin.lsn, 0); assert(r == 0);
r = toku_log_begin_checkpoint(logger, NULL, TRUE, 0); assert(r == 0);
r = toku_logger_close(&logger); assert(r == 0);
// run recovery
r = tokudb_recover(TESTDIR, TESTDIR, 0, 0, 0);
r = tokudb_recover(TESTDIR, TESTDIR,
toku_builtin_compare_fun, toku_builtin_compare_fun,
NULL, NULL,
0);
assert(r == 0);
return 0;
}
......
......@@ -7,7 +7,8 @@ static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
int
test_main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__unused__))) {
test_main(int argc, const char *argv[]) {
default_parse_args (argc, argv);
const char *n = __FILE__ "dump.brt";
int r;
BRT t;
......@@ -16,7 +17,7 @@ test_main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__u
unlink(n);
assert(f);
r = toku_brt_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_brt(n, 0, 1, &t, 1<<12, ct, null_txn, toku_builtin_compare_fun, null_db); assert(r==0);
r = toku_open_brt(n, 1, &t, 1<<12, ct, null_txn, toku_builtin_compare_fun, null_db); assert(r==0);
int i;
for (i=0; i<10000; i++) {
char key[100],val[100];
......
......@@ -12,6 +12,11 @@
#define CKERR2(r,r2) do { if (r!=r2) fprintf(stderr, "%s:%d error %d %s, expected %d\n", __FILE__, __LINE__, r, strerror(r), r2); assert(r==r2); } while (0)
#define CKERR2s(r,r2,r3) do { if (r!=r2 && r!=r3) fprintf(stderr, "%s:%d error %d %s, expected %d or %d\n", __FILE__, __LINE__, r, strerror(r), r2,r3); assert(r==r2||r==r3); } while (0)
#define DEBUG_LINE() do { \
fprintf(stderr, "%s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
fflush(stderr); \
} while (0)
const ITEMLEN len_ignore = 0xFFFFFFFF;
struct check_pair {
......@@ -87,10 +92,11 @@ default_parse_args (int argc, const char *argv[]) {
int test_main(int argc, const char *argv[]);
static void dummy(void) {}
static void dummy_set_brt(DB *db UU(), BRT brt UU()) {}
int
main(int argc, const char *argv[]) {
int rinit = toku_brt_init(dummy, dummy);
int rinit = toku_brt_init(dummy, dummy, dummy_set_brt);
CKERR(rinit);
int r = test_main(argc, argv);
int rdestroy = toku_brt_destroy();
......
......@@ -5,12 +5,12 @@
static void
test0 (void) {
u_int32_t c = x1764_memory("", 0);
assert(c==0);
assert(c==~(0U));
struct x1764 cs;
x1764_init(&cs);
x1764_add(&cs, "", 0);
c = x1764_finish(&cs);
assert(c==0);
assert(c==~(0U));
}
static void
......@@ -23,7 +23,7 @@ test1 (void) {
u_int32_t expect = expect64 ^ (expect64>>32);
c = x1764_memory(&v, i);
//printf("i=%d c=%08x expect=%08x\n", i, c, expect);
assert(c==expect);
assert(c==~expect);
}
}
......
......@@ -756,7 +756,7 @@ int le_is_provdel(LEAFENTRY le) {
int
le_has_xids(LEAFENTRY le, XIDS xids) {
int rval;
int rval=0;
//Read num_uxrs
u_int8_t num_uxrs = le->num_xrs;
......
......@@ -25,7 +25,7 @@ u_int32_t x1764_memory (const void *buf, int len)
}
c = c*17 + input;
}
return (c&0xFFFFFFFF) ^ (c>>32);
return ~((c&0xFFFFFFFF) ^ (c>>32));
}
void x1764_init(struct x1764 *l) {
......@@ -172,5 +172,5 @@ u_int32_t x1764_finish (struct x1764 *l) {
if (len>0) {
l->sum = l->sum*17 + l->input;
}
return (l->sum &0xffffffff) ^ (l->sum>>32);
return ~((l->sum &0xffffffff) ^ (l->sum>>32));
}
......@@ -16,6 +16,11 @@ ifneq ($(TOKUDB_REVISION),)
CPPFLAGS += -DTOKUDB_REVISION=$(TOKUDB_REVISION)
endif
LOADER_USE_REFERENCE_MODEL=0
ifeq ($(LOADER_USE_REFERENCE_MODEL),1)
CPPFLAGS += -DLOADER_USE_REFERENCE_MODEL
endif
YDB=ydb.$(AEXT)
YDB_BUNDLE=ydb.bundle
TYDB=tydb.$(AEXT)
......@@ -37,6 +42,7 @@ OBJS_RAW = \
elocks \
#\end
#OBJS automatically defined.
loader.$(OEXT): $(LOG_HEADER)
LIBRARIES=
......
......@@ -29,6 +29,8 @@
toku_free;
toku_malloc;
toku_xmemdup;
toku_xrealloc;
toku_do_assert;
toku_os_get_file_size;
toku_os_getpid;
toku_os_gettid;
......@@ -62,6 +64,7 @@
toku_do_assert;
toku_do_assert_fail;
test_db_redirect_dictionary;
local: *;
};
This diff is collapsed.
......@@ -9,8 +9,8 @@
* Serial No. 11/760379 and to the patents and/or patent applications resulting from it.
*/
int toku_loader_create_loader(DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[N], uint32_t flags[N], uint32_t dbt_flags[N], void *extra);
int toku_loader_set_duplicate_callback(DB_LOADER *loader, void (*duplicate)(DB *db, int i, DBT *key, DBT *val));
int toku_loader_create_loader(DB_ENV *env, DB_TXN *txn, DB_LOADER **blp, DB *src_db, int N, DB *dbs[N], uint32_t db_flags[N], uint32_t dbt_flags[N], uint32_t loader_flags, void *extra);
int toku_loader_set_error_callback(DB_LOADER *loader, void (*error_cb)(DB *db, int i, int err, DBT *key, DBT *val, void *extra));
int toku_loader_set_poll_function(DB_LOADER *loader, int (*poll_func)(void *extra, float progress));
int toku_loader_put(DB_LOADER *loader, DBT *key, DBT *val);
int toku_loader_close(DB_LOADER *loader);
......
......@@ -17,7 +17,6 @@ LOCKTREE_TLOG = locktree_tlog.$(AEXT)
LOCKTREE_LOG = locktree_log.$(AEXT)
OBJS_RAW = \
db_id \
idlth \
lth \
rth \
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007-8 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <toku_portability.h>
#include <errno.h>
#include <toku_assert.h>
#include <string.h>
#include <limits.h>
#include <memory.h>
#include <hashfun.h>
#include "db_id.h"
BOOL toku_db_id_equals(const toku_db_id* a, const toku_db_id* b) {
assert(a && b);
return (BOOL)
(a == b ||
(a->saved_hash == b->saved_hash &&
memcmp(&a->id, &b->id, sizeof(b->id))==0));
}
void toku_db_id_add_ref(toku_db_id* db_id) {
assert(db_id);
assert(db_id->ref_count > 0);
db_id->ref_count++;
}
static void toku_db_id_close(toku_db_id** pdb_id) {
toku_db_id* db_id = *pdb_id;
toku_free(db_id);
*pdb_id = NULL;
}
void toku_db_id_remove_ref(toku_db_id** pdb_id) {
toku_db_id* db_id = *pdb_id;
assert(db_id);
assert(db_id->ref_count > 0);
db_id->ref_count--;
if (db_id->ref_count > 0) { return; }
toku_db_id_close(pdb_id);
}
int toku_db_id_create(toku_db_id** pdbid, int fd) {
int r = ENOSYS;
toku_db_id* db_id = NULL;
db_id = (toku_db_id *)toku_malloc(sizeof(*db_id));
if (!db_id) { r = ENOMEM; goto cleanup; }
memset(db_id, 0, sizeof(*db_id));
r = toku_os_get_unique_file_id(fd, &db_id->id);
if (r!=0) goto cleanup;
db_id->saved_hash = hash_key((unsigned char*)&db_id->id, sizeof(db_id->id));
db_id->ref_count = 1;
*pdbid = db_id;
r = 0;
cleanup:
if (r != 0) {
if (db_id != NULL) {
toku_free(db_id);
}
}
return r;
}
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007-8 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <toku_portability.h>
#include <brttypes.h>
#if !defined(TOKU_DB_ID_H)
#define TOKU_DB_ID_H
typedef struct __toku_db_id {
struct fileid id;
u_int32_t saved_hash;
u_int32_t ref_count;
} toku_db_id;
/* db_id methods */
int toku_db_id_create(toku_db_id** pdbid, int fd);
BOOL toku_db_id_equals(const toku_db_id* a, const toku_db_id* b);
void toku_db_id_add_ref(toku_db_id* db_id);
void toku_db_id_remove_ref(toku_db_id** pdb_id);
#endif /* #if !defined(TOKU_DB_ID_H) */
......@@ -19,8 +19,8 @@
/* TODO: reallocate the hash idlth if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */
const u_int32_t __toku_idlth_init_size = 521;
static inline u_int32_t toku__idlth_hash(toku_idlth* idlth, toku_db_id* key) {
size_t tmp = key->saved_hash;
static inline u_int32_t toku__idlth_hash(toku_idlth* idlth, DICTIONARY_ID dict_id) {
uint32_t tmp = dict_id.dictid;
return tmp % idlth->num_buckets;
}
......@@ -63,14 +63,14 @@ int toku_idlth_create(toku_idlth** pidlth,
return r;
}
toku_lt_map* toku_idlth_find(toku_idlth* idlth, toku_db_id* key) {
toku_lt_map* toku_idlth_find(toku_idlth* idlth, DICTIONARY_ID dict_id) {
assert(idlth);
u_int32_t index = toku__idlth_hash(idlth, key);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
toku_idlth_elt* head = &idlth->buckets[index];
toku_idlth_elt* current = head->next_in_bucket;
while (current) {
if (toku_db_id_equals(current->value.db_id, key)) { break; }
if (current->value.dict_id.dictid == dict_id.dictid) { break; }
current = current->next_in_bucket;
}
return current ? &current->value : NULL;
......@@ -98,20 +98,20 @@ toku_lt_map* toku_idlth_next(toku_idlth* idlth) {
}
/* Element MUST exist. */
void toku_idlth_delete(toku_idlth* idlth, toku_db_id* key) {
void toku_idlth_delete(toku_idlth* idlth, DICTIONARY_ID dict_id) {
assert(idlth);
toku__invalidate_scan(idlth);
/* Must have elements. */
assert(idlth->num_keys);
u_int32_t index = toku__idlth_hash(idlth, key);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
toku_idlth_elt* head = &idlth->buckets[index];
toku_idlth_elt* prev = head;
toku_idlth_elt* current = prev->next_in_bucket;
while (current != NULL) {
if (toku_db_id_equals(current->value.db_id, key)) { break; }
if (current->value.dict_id.dictid == dict_id.dictid) { break; }
prev = current;
current = current->next_in_bucket;
}
......@@ -120,26 +120,24 @@ void toku_idlth_delete(toku_idlth* idlth, toku_db_id* key) {
current->prev_in_iteration->next_in_iteration = current->next_in_iteration;
current->next_in_iteration->prev_in_iteration = current->prev_in_iteration;
prev->next_in_bucket = current->next_in_bucket;
toku_db_id_remove_ref(&current->value.db_id);
idlth->free(current);
idlth->num_keys--;
return;
}
/* Will allow you to insert it over and over. You need to keep track. */
int toku_idlth_insert(toku_idlth* idlth, toku_db_id* key) {
int toku_idlth_insert(toku_idlth* idlth, DICTIONARY_ID dict_id) {
int r = ENOSYS;
assert(idlth);
toku__invalidate_scan(idlth);
u_int32_t index = toku__idlth_hash(idlth, key);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
/* Allocate a new one. */
toku_idlth_elt* element = (toku_idlth_elt*)idlth->malloc(sizeof(*element));
if (!element) { r = ENOMEM; goto cleanup; }
memset(element, 0, sizeof(*element));
element->value.db_id = key;
toku_db_id_add_ref(element->value.db_id);
element->value.dict_id = dict_id;
element->next_in_iteration = idlth->iter_head.next_in_iteration;
element->prev_in_iteration = &idlth->iter_head;
......@@ -166,7 +164,6 @@ static inline void toku__idlth_clear(toku_idlth* idlth, BOOL clean) {
while (next != head) {
element = next;
next = toku__idlth_next(idlth);
toku_db_id_remove_ref(&element->value.db_id);
idlth->free(element);
}
/* If clean is true, then we want to restore it to 'just created' status.
......
......@@ -15,7 +15,6 @@
#include <db.h>
#include <brttypes.h>
#include <rangetree.h>
#include <db_id.h>
#if !defined(TOKU_LOCKTREE_DEFINE)
#define TOKU_LOCKTREE_DEFINE
......@@ -24,7 +23,7 @@ typedef struct __toku_lock_tree toku_lock_tree;
typedef struct __toku_lt_map toku_lt_map;
struct __toku_lt_map {
toku_db_id* db_id;
DICTIONARY_ID dict_id;
toku_lock_tree* tree;
};
......@@ -57,17 +56,17 @@ int toku_idlth_create(toku_idlth** ptable,
void (*user_free) (void*),
void* (*user_realloc)(void*, size_t));
toku_lt_map* toku_idlth_find (toku_idlth* table, toku_db_id* key);
toku_lt_map* toku_idlth_find (toku_idlth* table, DICTIONARY_ID dict_id);
void toku_idlth_start_scan (toku_idlth* table);
toku_lt_map* toku_idlth_next (toku_idlth* table);
void toku_idlth_delete (toku_idlth* table, toku_db_id* key);
void toku_idlth_delete (toku_idlth* table, DICTIONARY_ID dict_id);
void toku_idlth_close (toku_idlth* table);
int toku_idlth_insert (toku_idlth* table, toku_db_id* key);
int toku_idlth_insert (toku_idlth* table, DICTIONARY_ID dict_id);
void toku_idlth_clear (toku_idlth* idlth);
......
......@@ -1273,21 +1273,20 @@ int toku_lt_create(toku_lock_tree** ptree, BOOL duplicates,
return r;
}
void toku_ltm_invalidate_lt(toku_ltm* mgr, toku_db_id* db_id) {
assert(mgr && db_id);
void toku_ltm_invalidate_lt(toku_ltm* mgr, DICTIONARY_ID dict_id) {
assert(mgr && dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_lt_map* map = NULL;
map = toku_idlth_find(mgr->idlth, db_id);
map = toku_idlth_find(mgr->idlth, dict_id);
if (map) {
toku_idlth_delete(mgr->idlth, db_id);
toku_idlth_delete(mgr->idlth, dict_id);
}
}
static inline void toku_lt_set_db_id(toku_lock_tree* lt, toku_db_id* db_id) {
assert(lt && db_id);
static inline void toku_lt_set_dict_id(toku_lock_tree* lt, DICTIONARY_ID dict_id) {
assert(lt && dict_id.dictid != DICTIONARY_ID_NONE.dictid);
assert(!lt->settings_final);
lt->db_id = db_id;
toku_db_id_add_ref(db_id);
lt->dict_id = dict_id;
}
static inline BOOL toku_lt_get_dups(toku_lock_tree* lt) {
......@@ -1296,7 +1295,7 @@ static inline BOOL toku_lt_get_dups(toku_lock_tree* lt) {
}
int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
BOOL duplicates, toku_db_id* db_id) {
BOOL duplicates, DICTIONARY_ID dict_id) {
/* first look in hash table to see if lock tree exists for that db,
if so return it */
int r = ENOSYS;
......@@ -1305,7 +1304,7 @@ int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
BOOL added_to_ltm = FALSE;
BOOL added_to_idlth = FALSE;
map = toku_idlth_find(mgr->idlth, db_id);
map = toku_idlth_find(mgr->idlth, dict_id);
if (map != NULL) {
/* Load already existing lock tree. */
assert (map->tree != NULL);
......@@ -1315,24 +1314,24 @@ int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
r = 0;
goto cleanup;
}
/* Must create new lock tree for this db_id*/
/* Must create new lock tree for this dict_id*/
r = toku_lt_create(&tree, duplicates, mgr->panic, mgr,
mgr->get_compare_fun_from_db,
mgr->get_dup_compare_from_db,
mgr->malloc, mgr->free, mgr->realloc);
if (r != 0) { goto cleanup; }
toku_lt_set_db_id(tree, db_id);
toku_lt_set_dict_id(tree, dict_id);
/* add tree to ltm */
r = toku_ltm_add_lt(mgr, tree);
if (r!=0) { goto cleanup; }
added_to_ltm = TRUE;
/* add mapping to idlth*/
r = toku_idlth_insert(mgr->idlth, db_id);
r = toku_idlth_insert(mgr->idlth, dict_id);
if (r != 0) { goto cleanup; }
added_to_idlth = TRUE;
map = toku_idlth_find(mgr->idlth, db_id);
map = toku_idlth_find(mgr->idlth, dict_id);
assert(map);
map->tree = tree;
......@@ -1343,7 +1342,7 @@ int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
if (r != 0) {
if (tree != NULL) {
if (added_to_ltm) { toku_ltm_remove_lt(mgr, tree); }
if (added_to_idlth) { toku_idlth_delete(mgr->idlth, db_id); }
if (added_to_idlth) { toku_idlth_delete(mgr->idlth, dict_id); }
toku_lt_close(tree);
}
}
......@@ -1375,7 +1374,6 @@ int toku_lt_close(toku_lock_tree* tree) {
toku_rth_close(tree->txns_still_locked);
tree->free(tree->buf);
if (tree->db_id) { toku_db_id_remove_ref(&tree->db_id); }
tree->free(tree);
r = first_error;
cleanup:
......@@ -2216,9 +2214,9 @@ void toku_lt_add_ref(toku_lock_tree* tree) {
static void toku_ltm_stop_managing_lt(toku_ltm* mgr, toku_lock_tree* tree) {
toku_ltm_remove_lt(mgr, tree);
toku_lt_map* map = toku_idlth_find(mgr->idlth, tree->db_id);
toku_lt_map* map = toku_idlth_find(mgr->idlth, tree->dict_id);
if (map && map->tree == tree) {
toku_idlth_delete(mgr->idlth, tree->db_id);
toku_idlth_delete(mgr->idlth, tree->dict_id);
}
}
......@@ -2228,7 +2226,7 @@ int toku_lt_remove_ref(toku_lock_tree* tree) {
assert(tree->ref_count > 0);
tree->ref_count--;
if (tree->ref_count > 0) { r = 0; goto cleanup; }
assert(tree->db_id);
assert(tree->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_ltm_stop_managing_lt(tree->mgr, tree);
r = toku_lt_close(tree);
if (r!=0) { goto cleanup; }
......
......@@ -120,8 +120,8 @@ struct __toku_lock_tree {
u_int32_t curr_locks;
/** The number of references held by DB instances and transactions to this lock tree*/
u_int32_t ref_count;
/** db_id associated with the lock tree */
toku_db_id* db_id;
/** DICTIONARY_ID associated with the lock tree */
DICTIONARY_ID dict_id;
TXNID table_lock_owner;
BOOL table_is_locked;
};
......@@ -220,12 +220,12 @@ int toku_lt_create(toku_lock_tree** ptree, BOOL duplicates,
void* (*user_realloc)(void*, size_t));
/**
Gets a lock tree for a given DB with id db_id
Gets a lock tree for a given DB with id dict_id
*/
int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
BOOL duplicates, toku_db_id* db_id);
BOOL duplicates, DICTIONARY_ID dict_id);
void toku_ltm_invalidate_lt(toku_ltm* mgr, toku_db_id* db_id);
void toku_ltm_invalidate_lt(toku_ltm* mgr, DICTIONARY_ID dict_id);
/**
Closes and frees a lock tree.
......
......@@ -9,7 +9,6 @@
#include <toku_assert.h>
#include <errno.h>
int verbose=0;
#include <db_id.h>
#include <lth.h>
#include <sys/types.h>
#include <sys/stat.h>
......
This diff is collapsed.
......@@ -12,10 +12,9 @@ static int r;
static u_int32_t lt_refs[100];
static toku_lock_tree* lts [100];
static toku_ltm* ltm = NULL;
static toku_db_id* db_ids[100];
static DICTIONARY_ID dict_ids[100];
static u_int32_t max_locks = 10;
int nums[10000];
int fd[100];
static void setup_ltm(void) {
assert(!ltm);
......@@ -31,7 +30,7 @@ static void db_open_tree(BOOL dups, size_t index, size_t db_id_index) {
(lt_refs[index] > 0 && lts[index]));
assert(ltm);
lt_refs[index]++;
r = toku_ltm_get_lt(ltm, &lts[index], dups, db_ids[db_id_index]);
r = toku_ltm_get_lt(ltm, &lts[index], dups, dict_ids[db_id_index]);
CKERR(r);
assert(lts[index]);
}
......@@ -116,14 +115,13 @@ static void initial_setup(void) {
u_int32_t i;
ltm = NULL;
assert(sizeof(db_ids) / sizeof(db_ids[0]) == sizeof(lts) / sizeof(lts[0]));
assert(sizeof(dict_ids) / sizeof(dict_ids[0]) == sizeof(lts) / sizeof(lts[0]));
for (i = 0; i < sizeof(lts) / sizeof(lts[0]); i++) {
lts[i] = NULL;
char name[sizeof(TESTDIR) + 256];
sprintf(name, TESTDIR "/file%05x.db", i);
fd[i] = open(name, O_CREAT|O_RDWR, S_IRWXU);
if (!db_ids[i]) toku_db_id_create(&db_ids[i], fd[i]);
assert(db_ids[i]);
dict_ids[i].dictid = i;
assert(dict_ids[i].dictid != DICTIONARY_ID_NONE.dictid);
lt_refs[i] = 0;
}
}
......@@ -132,10 +130,7 @@ static void close_test(void) {
u_int32_t i;
for (i = 0; i < sizeof(lts) / sizeof(lts[0]); i++) {
assert(lt_refs[i]==0); //The internal reference isn't counted.
assert(db_ids[i]);
toku_db_id_remove_ref(&db_ids[i]);
assert(!db_ids[i]);
close(fd[i]);
assert(dict_ids[i].dictid != DICTIONARY_ID_NONE.dictid);
}
}
......
#include <toku_portability.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
......
......@@ -133,6 +133,7 @@ BDB_DONTRUN_TESTS = \
recover-put-multiple-fdelete-some \
recover-split-checkpoint \
progress \
redirect \
#\ ends prev line
# checkpoint tests depend on this header file,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment