Commit f165ee02 authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

[t:4002] Commiting HOT to main.

git-svn-id: file:///svn/toku/tokudb@38549 c7de825b-a66e-492c-adef-691d508d4ae1
parent e44c7d7a
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -541,6 +545,7 @@ struct __toku_db { ...@@ -541,6 +545,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
...@@ -549,7 +554,7 @@ struct __toku_db { ...@@ -549,7 +554,7 @@ struct __toku_db {
int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going); int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going);
int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags); int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags);
int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags); int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags);
void* __toku_dummy0[11]; void* __toku_dummy0[10];
char __toku_dummy1[96]; char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=236 size=4, 64=bit offset=376 size=8 */ void *api_internal; /* 32-bit offset=236 size=4, 64=bit offset=376 size=8 */
void* __toku_dummy2[5]; void* __toku_dummy2[5];
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -551,6 +555,7 @@ struct __toku_db { ...@@ -551,6 +555,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
...@@ -559,7 +564,7 @@ struct __toku_db { ...@@ -559,7 +564,7 @@ struct __toku_db {
int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going); int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going);
int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags); int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags);
int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags); int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags);
void* __toku_dummy0[14]; void* __toku_dummy0[13];
char __toku_dummy1[96]; char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=248 size=4, 64=bit offset=400 size=8 */ void *api_internal; /* 32-bit offset=248 size=4, 64=bit offset=400 size=8 */
void* __toku_dummy2[5]; void* __toku_dummy2[5];
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -553,6 +557,7 @@ struct __toku_db { ...@@ -553,6 +557,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
...@@ -561,7 +566,7 @@ struct __toku_db { ...@@ -561,7 +566,7 @@ struct __toku_db {
int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going); int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going);
int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags); int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags);
int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags); int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags);
void* __toku_dummy0[16]; void* __toku_dummy0[15];
char __toku_dummy1[96]; char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=256 size=4, 64=bit offset=416 size=8 */ void *api_internal; /* 32-bit offset=256 size=4, 64=bit offset=416 size=8 */
void* __toku_dummy2[5]; void* __toku_dummy2[5];
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -553,6 +557,7 @@ struct __toku_db { ...@@ -553,6 +557,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
...@@ -561,7 +566,7 @@ struct __toku_db { ...@@ -561,7 +566,7 @@ struct __toku_db {
int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going); int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going);
int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags); int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags);
int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags); int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags);
void* __toku_dummy0[19]; void* __toku_dummy0[18];
char __toku_dummy1[96]; char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=268 size=4, 64=bit offset=440 size=8 */ void *api_internal; /* 32-bit offset=268 size=4, 64=bit offset=440 size=8 */
void* __toku_dummy2[5]; void* __toku_dummy2[5];
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -556,6 +560,7 @@ struct __toku_db { ...@@ -556,6 +560,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
...@@ -564,7 +569,7 @@ struct __toku_db { ...@@ -564,7 +569,7 @@ struct __toku_db {
int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going); int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going);
int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags); int (*update)(DB *, DB_TXN*, const DBT *key, const DBT *extra, u_int32_t flags);
int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags); int (*update_broadcast)(DB *, DB_TXN*, const DBT *extra, u_int32_t flags);
void* __toku_dummy1[23]; void* __toku_dummy1[22];
char __toku_dummy2[80]; char __toku_dummy2[80];
void *api_internal; /* 32-bit offset=276 size=4, 64=bit offset=464 size=8 */ void *api_internal; /* 32-bit offset=276 size=4, 64=bit offset=464 size=8 */
void* __toku_dummy3[5]; void* __toku_dummy3[5];
......
...@@ -590,7 +590,7 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__ ...@@ -590,7 +590,7 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
printf(" uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */\n"); printf(" uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */\n");
printf(" uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */\n"); printf(" uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */\n");
printf(" uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */\n"); printf(" uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */\n");
printf(" uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */\n"); printf(" uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the \"flush from root\" process to merge a leaf node */\n");
printf(" uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */\n"); printf(" uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */\n");
printf(" uint64_t flush_in_memory; /* number of in memory flushes */\n"); printf(" uint64_t flush_in_memory; /* number of in memory flushes */\n");
printf(" uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */\n"); printf(" uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */\n");
...@@ -616,6 +616,10 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__ ...@@ -616,6 +616,10 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
printf(" uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */\n"); printf(" uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */\n");
printf(" uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */\n"); printf(" uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */\n");
printf(" uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */\n"); printf(" uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */\n");
printf(" uint64_t hot_num_started; /* number of HOT operations that have begun */\n");
printf(" uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */\n");
printf(" uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */\n");
printf(" uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */\n");
printf(" uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/\n"); printf(" uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/\n");
printf(" uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/\n"); printf(" uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/\n");
printf(" uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/\n"); printf(" uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/\n");
...@@ -804,6 +808,7 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__ ...@@ -804,6 +808,7 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
"int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */", "int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */",
"int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */", "int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */",
"int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */", "int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */",
"int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra)",
"int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION)", "int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION)",
"int (*get_readpagesize)(DB*,u_int32_t*)", "int (*get_readpagesize)(DB*,u_int32_t*)",
"int (*set_readpagesize)(DB*,u_int32_t)", "int (*set_readpagesize)(DB*,u_int32_t)",
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -525,6 +529,7 @@ struct __toku_db { ...@@ -525,6 +529,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
......
...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status { ...@@ -197,7 +197,7 @@ typedef struct __toku_engine_status {
uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_max_buffer_workdone; /* max workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */ uint64_t cleaner_min_buffer_workdone; /* min workdone value of any message buffer flushed by cleaner thread */
uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */ uint64_t cleaner_total_buffer_workdone; /* total workdone value of message buffers flushed by cleaner thread */
uint64_t cleaner_num_leaves_unmerged; /* number of leaves left unmerged by the cleaner thread */ uint64_t cleaner_num_dirtied_for_leaf_merge; /* nodes dirtied by the "flush from root" process to merge a leaf node */
uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */ uint64_t flush_total; /* total number of flushes done by flusher threads or cleaner threads */
uint64_t flush_in_memory; /* number of in memory flushes */ uint64_t flush_in_memory; /* number of in memory flushes */
uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */ uint64_t flush_needed_io; /* number of flushes that had to read a child (or part) off disk */
...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status { ...@@ -223,6 +223,10 @@ typedef struct __toku_engine_status {
uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */ uint64_t dirty_leaf; /* number of times leaf nodes are dirtied when previously clean */
uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */ uint64_t dirty_nonleaf; /* number of times nonleaf nodes are dirtied when previously clean */
uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */ uint64_t balance_leaf; /* number of times a leaf node is balanced inside brt */
uint64_t hot_num_started; /* number of HOT operations that have begun */
uint64_t hot_num_completed; /* number of HOT operations that have successfully completed */
uint64_t hot_num_aborted; /* number of HOT operations that have been aborted */
uint64_t hot_max_root_flush_count; /* max number of flushes from root ever required to optimize a tree */
uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/ uint64_t msg_bytes_in; /* how many bytes of messages injected at root (for all trees)*/
uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/ uint64_t msg_bytes_out; /* how many bytes of messages flushed from h1 nodes to leaves*/
uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/ uint64_t msg_bytes_curr; /* how many bytes of messages currently in trees (estimate)*/
...@@ -525,6 +529,7 @@ struct __toku_db { ...@@ -525,6 +529,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */; int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */; int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */; int (*optimize)(DB*) /* Run garbage collecion and promote all transactions older than oldest. Amortized (happens during flattening) */;
int (*hot_optimize)(DB*, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra);
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION); int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
int (*get_readpagesize)(DB*,u_int32_t*); int (*get_readpagesize)(DB*,u_int32_t*);
int (*set_readpagesize)(DB*,u_int32_t); int (*set_readpagesize)(DB*,u_int32_t);
......
...@@ -51,6 +51,7 @@ BRT_SOURCES = \ ...@@ -51,6 +51,7 @@ BRT_SOURCES = \
brt \ brt \
brt-cachetable-wrappers \ brt-cachetable-wrappers \
brt-flusher \ brt-flusher \
brt-hot-flusher \
brt_msg \ brt_msg \
brt-test-helpers \ brt-test-helpers \
cachetable \ cachetable \
......
...@@ -8,7 +8,7 @@ static int brt_root_put_cmd_XY (BRT brt, BRT_MSG *md, TOKUTXN txn) { ...@@ -8,7 +8,7 @@ static int brt_root_put_cmd_XY (BRT brt, BRT_MSG *md, TOKUTXN txn) {
if (0) { died0: toku_unpin_brt_header(brt); } if (0) { died0: toku_unpin_brt_header(brt); }
return r; return r;
} }
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt->h);
if ((r=cachetable_get_and_pin(brt->cf, *rootp, &node_v, NULL, if ((r=cachetable_get_and_pin(brt->cf, *rootp, &node_v, NULL,
toku_brtnode_flush_callback, toku_brtnode_fetch_callback, (void*)(long)brt->h->nodesize))) { toku_brtnode_flush_callback, toku_brtnode_fetch_callback, (void*)(long)brt->h->nodesize))) {
goto died0; goto died0;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <brt-cachetable-wrappers.h> #include <brt-cachetable-wrappers.h>
#include <brttypes.h> #include <brttypes.h>
#include <brt-flusher.h>
#include <brt-internal.h> #include <brt-internal.h>
#include <cachetable.h> #include <cachetable.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef BRT_FLUSHER_INTERNAL
#define BRT_FLUSHER_INTERNAL
#ident "$Id$"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <brttypes.h>
#include <c_dialects.h>
C_BEGIN
typedef struct flusher_advice FLUSHER_ADVICE;
/**
* Choose a child to flush to. Returns a childnum, or -1 if we should
* go no further.
*
* Flusher threads: pick the heaviest child buffer
* Cleaner threads: pick the heaviest child buffer
* Cleaner thread merging leaf nodes: follow down to a key
* Hot optimize table: follow down to the right of a key
*/
typedef int (*FA_PICK_CHILD)(struct brt_header *h, BRTNODE parent, void* extra);
/**
* Decide whether to call `flush_some_child` on the child if it is
* stable and a nonleaf node.
*
* Flusher threads: yes if child is gorged
* Cleaner threads: yes if child is gorged
* Cleaner thread merging leaf nodes: always yes
* Hot optimize table: always yes
*/
typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(BRTNODE child, void* extra);
/**
* Called if the child needs merging. Should do something to get the
* child out of a fusible state. Must unpin parent and child.
*
* Flusher threads: just do the merge
* Cleaner threads: if nonleaf, just merge, otherwise start a "cleaner
* thread merge"
* Cleaner thread merging leaf nodes: just do the merge
* Hot optimize table: just do the merge
*/
typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa,
struct brt_header *h,
BRTNODE parent,
int childnum,
BRTNODE child,
void* extra);
/**
* Cleaner threads may need to destroy basement nodes which have been
* brought more up to date than the height 1 node flushing to them.
* This function is used to determine if we need to check for basement
* nodes that are too up to date, and then destroy them if we find
* them.
*
* Flusher threads: no
* Cleaner threads: yes
* Cleaner thread merging leaf nodes: no
* Hot optimize table: no
*/
typedef bool (*FA_SHOULD_DESTROY_BN)(void* extra);
/**
* Update `brt_flusher_status` in whatever way necessary. Called once
* by `flush_some_child` right before choosing what to do next (split,
* merge, recurse), with the number of nodes that were dirtied by this
* execution of `flush_some_child`.
*/
typedef void (*FA_UPDATE_STATUS)(BRTNODE child, int dirtied, void* extra);
/**
* Choose whether to go to the left or right child after a split. Called
* by `brt_split_child`. If -1 is returned, `brt_split_child` defaults to
* the old behavior.
*/
typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(struct brt_header* h,
BRTNODE node,
int childnuma,
int childnumb,
void* extra);
/**
* A collection of callbacks used by the flushing machinery to make
* various decisions. There are implementations of each of these
* functions for flusher threads (ft_*), cleaner threads (ct_*), , and hot
* optimize table (hot_*).
*/
struct flusher_advice {
FA_PICK_CHILD pick_child;
FA_SHOULD_RECURSIVELY_FLUSH should_recursively_flush;
FA_MAYBE_MERGE_CHILD maybe_merge_child;
FA_SHOULD_DESTROY_BN should_destroy_basement_nodes;
FA_UPDATE_STATUS update_status;
FA_PICK_CHILD_AFTER_SPLIT pick_child_after_split;
void* extra; // parameter passed into callbacks
};
void
flusher_advice_init(
struct flusher_advice *fa,
FA_PICK_CHILD pick_child,
FA_SHOULD_DESTROY_BN should_destroy_basement_nodes,
FA_SHOULD_RECURSIVELY_FLUSH should_recursively_flush,
FA_MAYBE_MERGE_CHILD maybe_merge_child,
FA_UPDATE_STATUS update_status,
FA_PICK_CHILD_AFTER_SPLIT pick_child_after_split,
void* extra
);
void
flush_some_child(
struct brt_header* h,
BRTNODE parent,
struct flusher_advice *fa);
bool
always_recursively_flush(BRTNODE child, void* extra);
bool
dont_destroy_basement_nodes(void* extra);
void
default_merge_child(struct flusher_advice *fa,
struct brt_header *h,
BRTNODE parent,
int childnum,
BRTNODE child,
void* extra);
int
default_pick_child_after_split(struct brt_header *h,
BRTNODE parent,
int childnuma,
int childnumb,
void *extra);
C_END
#endif // End of header guardian.
This diff is collapsed.
...@@ -11,6 +11,39 @@ ...@@ -11,6 +11,39 @@
C_BEGIN C_BEGIN
typedef struct brt_flusher_status {
uint64_t cleaner_total_nodes; // total number of nodes whose buffers are potentially flushed by cleaner thread
uint64_t cleaner_h1_nodes; // number of nodes of height one whose message buffers are flushed by cleaner thread
uint64_t cleaner_hgt1_nodes; // number of nodes of height > 1 whose message buffers are flushed by cleaner thread
uint64_t cleaner_empty_nodes; // number of nodes that are selected by cleaner, but whose buffers are empty
uint64_t cleaner_nodes_dirtied; // number of nodes that are made dirty by the cleaner thread
uint64_t cleaner_max_buffer_size; // max number of bytes in message buffer flushed by cleaner thread
uint64_t cleaner_min_buffer_size;
uint64_t cleaner_total_buffer_size;
uint64_t cleaner_max_buffer_workdone; // max workdone value of any message buffer flushed by cleaner thread
uint64_t cleaner_min_buffer_workdone;
uint64_t cleaner_total_buffer_workdone;
uint64_t cleaner_num_dirtied_for_leaf_merge; // nodes dirtied by the "flush from root" process to merge a leaf node
uint64_t flush_total; // total number of flushes done by flusher threads or cleaner threads
uint64_t flush_in_memory; // number of in memory flushes
uint64_t flush_needed_io; // number of flushes that had to read a child (or part) off disk
uint64_t flush_cascades; // number of flushes that triggered another flush in the child
uint64_t flush_cascades_1; // number of flushes that triggered 1 cascading flush
uint64_t flush_cascades_2; // number of flushes that triggered 2 cascading flushes
uint64_t flush_cascades_3; // number of flushes that triggered 3 cascading flushes
uint64_t flush_cascades_4; // number of flushes that triggered 4 cascading flushes
uint64_t flush_cascades_5; // number of flushes that triggered 5 cascading flushes
uint64_t flush_cascades_gt_5; // number of flushes that triggered more than 5 cascading flushes
uint64_t split_leaf; // number of leaf nodes split
uint64_t split_nonleaf; // number of nonleaf nodes split
uint64_t merge_leaf; // number of times leaf nodes are merged
uint64_t merge_nonleaf; // number of times nonleaf nodes are merged
uint64_t balance_leaf; // number of times a leaf node is balanced inside brt
} BRT_FLUSHER_STATUS_S, *BRT_FLUSHER_STATUS;
void toku_brt_flusher_status_init(void);
void toku_brt_flusher_get_status(BRT_FLUSHER_STATUS);
/** /**
* Only for testing, not for production. * Only for testing, not for production.
* *
...@@ -32,12 +65,11 @@ toku_flusher_thread_set_callback( ...@@ -32,12 +65,11 @@ toku_flusher_thread_set_callback(
* brt_status which currently just lives in brt.c. * brt_status which currently just lives in brt.c.
*/ */
int int
toku_brtnode_cleaner_callback_internal( toku_brtnode_cleaner_callback(
void *brtnode_pv, void *brtnode_pv,
BLOCKNUM blocknum, BLOCKNUM blocknum,
u_int32_t fullhash, u_int32_t fullhash,
void *extraargs, void *extraargs
BRT_STATUS brt_status
); );
/** /**
...@@ -47,8 +79,7 @@ toku_brtnode_cleaner_callback_internal( ...@@ -47,8 +79,7 @@ toku_brtnode_cleaner_callback_internal(
void void
flush_node_on_background_thread( flush_node_on_background_thread(
BRT brt, BRT brt,
BRTNODE parent, BRTNODE parent
BRT_STATUS brt_status
); );
/** /**
...@@ -68,8 +99,7 @@ brtleaf_split( ...@@ -68,8 +99,7 @@ brtleaf_split(
DBT *splitk, DBT *splitk,
BOOL create_new_node, BOOL create_new_node,
u_int32_t num_dependent_nodes, u_int32_t num_dependent_nodes,
BRTNODE* dependent_nodes, BRTNODE* dependent_nodes
BRT_STATUS brt_status
); );
/** /**
...@@ -89,10 +119,33 @@ brt_nonleaf_split( ...@@ -89,10 +119,33 @@ brt_nonleaf_split(
BRTNODE *nodeb, BRTNODE *nodeb,
DBT *splitk, DBT *splitk,
u_int32_t num_dependent_nodes, u_int32_t num_dependent_nodes,
BRTNODE* dependent_nodes, BRTNODE* dependent_nodes
BRT_STATUS brt_status
); );
/************************************************************************
* HOT optimize, should perhaps be factored out to its own header file *
************************************************************************
*/
typedef struct brt_hot_status {
uint64_t num_started; // number of HOT operations that have begun
uint64_t num_completed; // number of HOT operations that have successfully completed
uint64_t num_aborted; // number of HOT operations that have been aborted
uint64_t max_root_flush_count; // max number of flushes from root ever required to optimize a tree
} BRT_HOT_STATUS_S, *BRT_HOT_STATUS;
void toku_brt_hot_get_status(BRT_HOT_STATUS);
/**
* Takes given BRT and pushes all pending messages to the leaf nodes.
*/
int
toku_brt_hot_optimize(BRT brt,
int (*progress_callback)(void *extra, float progress),
void *progress_extra);
C_END C_END
#endif // End of header guardian. #endif // End of header guardian.
This diff is collapsed.
...@@ -341,13 +341,6 @@ enum { ...@@ -341,13 +341,6 @@ enum {
u_int32_t compute_child_fullhash (CACHEFILE cf, BRTNODE node, int childnum); u_int32_t compute_child_fullhash (CACHEFILE cf, BRTNODE node, int childnum);
struct remembered_hash {
BOOL valid; // set to FALSE if the fullhash is invalid
FILENUM fnum;
BLOCKNUM root;
u_int32_t fullhash; // fullhash is the hashed value of fnum and root.
};
// The brt_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata. // The brt_header is not managed by the cachetable. Instead, it hangs off the cachefile as userdata.
enum brtheader_type {BRTHEADER_CURRENT=1, BRTHEADER_CHECKPOINT_INPROGRESS}; enum brtheader_type {BRTHEADER_CURRENT=1, BRTHEADER_CHECKPOINT_INPROGRESS};
...@@ -380,7 +373,6 @@ struct brt_header { ...@@ -380,7 +373,6 @@ struct brt_header {
unsigned int nodesize; unsigned int nodesize;
unsigned int basementnodesize; unsigned int basementnodesize;
BLOCKNUM root; // roots of the dictionary BLOCKNUM root; // roots of the dictionary
struct remembered_hash root_hash; // hash of the root offset.
unsigned int flags; unsigned int flags;
DESCRIPTOR_S descriptor; DESCRIPTOR_S descriptor;
...@@ -404,6 +396,11 @@ struct brt_header { ...@@ -404,6 +396,11 @@ struct brt_header {
STAT64INFO_S in_memory_stats; STAT64INFO_S in_memory_stats;
STAT64INFO_S on_disk_stats; STAT64INFO_S on_disk_stats;
STAT64INFO_S checkpoint_staging_stats; STAT64INFO_S checkpoint_staging_stats;
uint64_t time_of_last_optimize_begin; // last time that a hot optimize operation was begun
uint64_t time_of_last_optimize_end; // last time that a hot optimize operation was successfully completed
uint32_t count_of_optimize_in_progress; // the number of hot optimize operations currently in progress on this tree
uint32_t count_of_optimize_in_progress_read_from_disk; // the number of hot optimize operations in progress on this tree at the time of the last crash (this field is in-memory only)
MSN msn_at_start_of_last_completed_optimize; // all messages before this msn have been applied to leaf nodes
}; };
struct brt { struct brt {
...@@ -526,10 +523,9 @@ extern void toku_brtnode_pe_est_callback(void* brtnode_pv, long* bytes_freed_est ...@@ -526,10 +523,9 @@ extern void toku_brtnode_pe_est_callback(void* brtnode_pv, long* bytes_freed_est
extern int toku_brtnode_pe_callback (void *brtnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs); extern int toku_brtnode_pe_callback (void *brtnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs);
extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs); extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs);
int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, int fd, PAIR_ATTR* sizep); int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, int fd, PAIR_ATTR* sizep);
extern int toku_brtnode_cleaner_callback (void* brtnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void* extraargs);
extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn); extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn);
extern int toku_read_brt_header_and_store_in_cachefile (BRT brt, CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open); extern int toku_read_brt_header_and_store_in_cachefile (BRT brt, CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open);
extern CACHEKEY* toku_calculate_root_offset_pointer (BRT brt, u_int32_t *root_hash); extern CACHEKEY* toku_calculate_root_offset_pointer (struct brt_header* h, u_int32_t *root_hash);
static const BRTNODE null_brtnode=0; static const BRTNODE null_brtnode=0;
...@@ -716,15 +712,31 @@ unsigned int toku_brtnode_which_child(BRTNODE node, const DBT *k, ...@@ -716,15 +712,31 @@ unsigned int toku_brtnode_which_child(BRTNODE node, const DBT *k,
DESCRIPTOR desc, brt_compare_func cmp) DESCRIPTOR desc, brt_compare_func cmp)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
/**
* Finds the next child for HOT to flush to, given that everything up to
* and including k has been flattened.
*
* If k falls between pivots in node, then we return the childnum where k
* lies.
*
* If k is equal to some pivot, then we return the next (to the right)
* childnum.
*/
unsigned int toku_brtnode_hot_next_child(BRTNODE node,
const DBT *k,
DESCRIPTOR desc,
brt_compare_func cmp);
/* Stuff for testing */ /* Stuff for testing */
// toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called. // toku_testsetup_initialize() must be called before any other test_setup_xxx() functions are called.
void toku_testsetup_initialize(void); void toku_testsetup_initialize(void);
int toku_testsetup_leaf(BRT brt, BLOCKNUM *); int toku_testsetup_leaf(BRT brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens);
int toku_testsetup_nonleaf (BRT brt, int height, BLOCKNUM *diskoff, int n_children, BLOCKNUM *children, char **keys, int *keylens); int toku_testsetup_nonleaf (BRT brt, int height, BLOCKNUM *diskoff, int n_children, BLOCKNUM *children, char **keys, int *keylens);
int toku_testsetup_root(BRT brt, BLOCKNUM); int toku_testsetup_root(BRT brt, BLOCKNUM);
int toku_testsetup_get_sersize(BRT brt, BLOCKNUM); // Return the size on disk. int toku_testsetup_get_sersize(BRT brt, BLOCKNUM); // Return the size on disk.
int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM, char *key, int keylen, char *val, int vallen); int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM, char *key, int keylen, char *val, int vallen);
int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM, enum brt_msg_type, char *key, int keylen, char *val, int vallen); int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM, enum brt_msg_type, char *key, int keylen, char *val, int vallen);
void toku_pin_node_with_min_bfe(BRTNODE* node, BLOCKNUM b, BRT t);
// These two go together to do lookups in a brtnode using the keys in a command. // These two go together to do lookups in a brtnode using the keys in a command.
struct cmd_leafval_heaviside_extra { struct cmd_leafval_heaviside_extra {
...@@ -799,28 +811,6 @@ struct brt_status { ...@@ -799,28 +811,6 @@ struct brt_status {
uint64_t search_root_retries; // number of searches that required the root node to be fetched more than once uint64_t search_root_retries; // number of searches that required the root node to be fetched more than once
uint64_t search_tries_gt_height; // number of searches that required more tries than the height of the tree uint64_t search_tries_gt_height; // number of searches that required more tries than the height of the tree
uint64_t search_tries_gt_heightplus3; // number of searches that required more tries than the height of the tree plus three uint64_t search_tries_gt_heightplus3; // number of searches that required more tries than the height of the tree plus three
uint64_t cleaner_total_nodes; // total number of nodes whose buffers are potentially flushed by cleaner thread
uint64_t cleaner_h1_nodes; // number of nodes of height one whose message buffers are flushed by cleaner thread
uint64_t cleaner_hgt1_nodes; // number of nodes of height > 1 whose message buffers are flushed by cleaner thread
uint64_t cleaner_empty_nodes; // number of nodes that are selected by cleaner, but whose buffers are empty
uint64_t cleaner_nodes_dirtied; // number of nodes that are made dirty by the cleaner thread
uint64_t cleaner_max_buffer_size; // max number of bytes in message buffer flushed by cleaner thread
uint64_t cleaner_min_buffer_size;
uint64_t cleaner_total_buffer_size;
uint64_t cleaner_max_buffer_workdone; // max workdone value of any message buffer flushed by cleaner thread
uint64_t cleaner_min_buffer_workdone;
uint64_t cleaner_total_buffer_workdone;
uint64_t cleaner_num_leaves_unmerged; // number of leaves left unmerged by the cleaner thread
uint64_t flush_total; // total number of flushes done by flusher threads or cleaner threads
uint64_t flush_in_memory; // number of in memory flushes
uint64_t flush_needed_io; // number of flushes that had to read a child (or part) off disk
uint64_t flush_cascades; // number of flushes that triggered another flush in the child
uint64_t flush_cascades_1; // number of flushes that triggered 1 cascading flush
uint64_t flush_cascades_2; // number of flushes that triggered 2 cascading flushes
uint64_t flush_cascades_3; // number of flushes that triggered 3 cascading flushes
uint64_t flush_cascades_4; // number of flushes that triggered 4 cascading flushes
uint64_t flush_cascades_5; // number of flushes that triggered 5 cascading flushes
uint64_t flush_cascades_gt_5; // number of flushes that triggered more than 5 cascading flushes
uint64_t disk_flush_leaf; // number of leaf nodes flushed to disk, not for checkpoint uint64_t disk_flush_leaf; // number of leaf nodes flushed to disk, not for checkpoint
uint64_t disk_flush_nonleaf; // number of nonleaf nodes flushed to disk, not for checkpoint uint64_t disk_flush_nonleaf; // number of nonleaf nodes flushed to disk, not for checkpoint
uint64_t disk_flush_leaf_for_checkpoint; // number of leaf nodes flushed to disk for checkpoint uint64_t disk_flush_leaf_for_checkpoint; // number of leaf nodes flushed to disk for checkpoint
...@@ -829,13 +819,8 @@ struct brt_status { ...@@ -829,13 +819,8 @@ struct brt_status {
uint64_t create_nonleaf; // number of nonleaf nodes created uint64_t create_nonleaf; // number of nonleaf nodes created
uint64_t destroy_leaf; // number of leaf nodes destroyed uint64_t destroy_leaf; // number of leaf nodes destroyed
uint64_t destroy_nonleaf; // number of nonleaf nodes destroyed uint64_t destroy_nonleaf; // number of nonleaf nodes destroyed
uint64_t split_leaf; // number of leaf nodes split
uint64_t split_nonleaf; // number of nonleaf nodes split
uint64_t merge_leaf; // number of times leaf nodes are merged
uint64_t merge_nonleaf; // number of times nonleaf nodes are merged
uint64_t dirty_leaf; // number of times leaf nodes are dirtied when previously clean uint64_t dirty_leaf; // number of times leaf nodes are dirtied when previously clean
uint64_t dirty_nonleaf; // number of times nonleaf nodes are dirtied when previously clean uint64_t dirty_nonleaf; // number of times nonleaf nodes are dirtied when previously clean
uint64_t balance_leaf; // number of times a leaf node is balanced inside brt
uint64_t msg_bytes_in; // how many bytes of messages injected at root (for all trees) uint64_t msg_bytes_in; // how many bytes of messages injected at root (for all trees)
uint64_t msg_bytes_out; // how many bytes of messages flushed from h1 nodes to leaves uint64_t msg_bytes_out; // how many bytes of messages flushed from h1 nodes to leaves
uint64_t msg_bytes_curr; // how many bytes of messages currently in trees (estimate) uint64_t msg_bytes_curr; // how many bytes of messages currently in trees (estimate)
...@@ -865,9 +850,6 @@ struct brt_status { ...@@ -865,9 +850,6 @@ struct brt_status {
void toku_brt_get_status(BRT_STATUS); void toku_brt_get_status(BRT_STATUS);
void
brtleaf_split (struct brt_header* h, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk, BOOL create_new_node, u_int32_t num_dependent_nodes, BRTNODE* dependent_nodes, BRT_STATUS brt_status);
void void
brt_leaf_apply_cmd_once ( brt_leaf_apply_cmd_once (
BRTNODE leafnode, BRTNODE leafnode,
...@@ -906,6 +888,18 @@ void toku_apply_cmd_to_leaf( ...@@ -906,6 +888,18 @@ void toku_apply_cmd_to_leaf(
OMT live_list_reverse OMT live_list_reverse
); );
void brtnode_put_cmd (
brt_compare_func compare_fun,
brt_update_func update_fun,
DESCRIPTOR desc,
BRTNODE node,
BRT_MSG cmd,
bool is_fresh,
OMT snapshot_txnids,
OMT live_list_reverse
);
void toku_reset_root_xid_that_created(BRT brt, TXNID new_root_xid_that_created); void toku_reset_root_xid_that_created(BRT brt, TXNID new_root_xid_that_created);
// Reset the root_xid_that_created field to the given value. // Reset the root_xid_that_created field to the given value.
// This redefines which xid created the dictionary. // This redefines which xid created the dictionary.
...@@ -913,6 +907,10 @@ void toku_reset_root_xid_that_created(BRT brt, TXNID new_root_xid_that_created); ...@@ -913,6 +907,10 @@ void toku_reset_root_xid_that_created(BRT brt, TXNID new_root_xid_that_created);
void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra); void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra);
void toku_brt_header_note_hot_begin(BRT brt);
void toku_brt_header_note_hot_complete(BRT brt, BOOL success, MSN msn_at_start_of_hot);
C_END C_END
#endif #endif
...@@ -1815,6 +1815,11 @@ serialize_brt_header_min_size (u_int32_t version) { ...@@ -1815,6 +1815,11 @@ serialize_brt_header_min_size (u_int32_t version) {
switch(version) { switch(version) {
case BRT_LAYOUT_VERSION_18:
size += sizeof(uint64_t); // time_of_last_optimize_begin
size += sizeof(uint64_t); // time_of_last_optimize_end
size += sizeof(uint32_t); // count_of_optimize_in_progress
size += sizeof(MSN); // msn_at_start_of_last_completed_optimize
case BRT_LAYOUT_VERSION_17: case BRT_LAYOUT_VERSION_17:
size += 16; size += 16;
invariant(sizeof(STAT64INFO_S) == 16); invariant(sizeof(STAT64INFO_S) == 16);
...@@ -1891,6 +1896,10 @@ int toku_serialize_brt_header_to_wbuf (struct wbuf *wbuf, struct brt_header *h, ...@@ -1891,6 +1896,10 @@ int toku_serialize_brt_header_to_wbuf (struct wbuf *wbuf, struct brt_header *h,
wbuf_ulonglong(wbuf, h->time_of_last_verification); wbuf_ulonglong(wbuf, h->time_of_last_verification);
wbuf_ulonglong(wbuf, h->checkpoint_staging_stats.numrows); wbuf_ulonglong(wbuf, h->checkpoint_staging_stats.numrows);
wbuf_ulonglong(wbuf, h->checkpoint_staging_stats.numbytes); wbuf_ulonglong(wbuf, h->checkpoint_staging_stats.numbytes);
wbuf_ulonglong(wbuf, h->time_of_last_optimize_begin);
wbuf_ulonglong(wbuf, h->time_of_last_optimize_end);
wbuf_int(wbuf, h->count_of_optimize_in_progress);
wbuf_MSN(wbuf, h->msn_at_start_of_last_completed_optimize);
u_int32_t checksum = x1764_finish(&wbuf->checksum); u_int32_t checksum = x1764_finish(&wbuf->checksum);
wbuf_int(wbuf, checksum); wbuf_int(wbuf, checksum);
lazy_assert(wbuf->ndone == wbuf->size); lazy_assert(wbuf->ndone == wbuf->size);
...@@ -2143,7 +2152,6 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) { ...@@ -2143,7 +2152,6 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
} }
h->root = rbuf_blocknum(&rc); h->root = rbuf_blocknum(&rc);
h->root_hash.valid = FALSE;
h->flags = rbuf_int(&rc); h->flags = rbuf_int(&rc);
h->layout_version_original = rbuf_int(&rc); h->layout_version_original = rbuf_int(&rc);
h->build_id_original = rbuf_int(&rc); h->build_id_original = rbuf_int(&rc);
...@@ -2161,10 +2169,15 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) { ...@@ -2161,10 +2169,15 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
h->basementnodesize = rbuf_int(&rc); h->basementnodesize = rbuf_int(&rc);
h->time_of_last_verification = rbuf_ulonglong(&rc); h->time_of_last_verification = rbuf_ulonglong(&rc);
} }
if (h->layout_version >= BRT_LAYOUT_VERSION_17) { if (h->layout_version >= BRT_LAYOUT_VERSION_18) {
h->on_disk_stats.numrows = rbuf_ulonglong(&rc); h->on_disk_stats.numrows = rbuf_ulonglong(&rc);
h->on_disk_stats.numbytes = rbuf_ulonglong(&rc); h->on_disk_stats.numbytes = rbuf_ulonglong(&rc);
h->in_memory_stats = h->on_disk_stats; h->in_memory_stats = h->on_disk_stats;
h->time_of_last_optimize_begin = rbuf_ulonglong(&rc);
h->time_of_last_optimize_end = rbuf_ulonglong(&rc);
h->count_of_optimize_in_progress = rbuf_int(&rc);
h->count_of_optimize_in_progress_read_from_disk = h->count_of_optimize_in_progress;
h->msn_at_start_of_last_completed_optimize = rbuf_msn(&rc);
} }
(void)rbuf_int(&rc); //Read in checksum and ignore (already verified). (void)rbuf_int(&rc); //Read in checksum and ignore (already verified).
...@@ -2219,7 +2232,8 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br ...@@ -2219,7 +2232,8 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br
case BRT_LAYOUT_VERSION_14: case BRT_LAYOUT_VERSION_14:
h->basementnodesize = 128*1024; // basement nodes added in v15 h->basementnodesize = 128*1024; // basement nodes added in v15
//fall through on purpose //fall through on purpose
case BRT_LAYOUT_VERSION_17: case BRT_LAYOUT_VERSION_18:
case BRT_LAYOUT_VERSION_17: // version 17 never released to customers
case BRT_LAYOUT_VERSION_16: // version 16 never released to customers case BRT_LAYOUT_VERSION_16: // version 16 never released to customers
case BRT_LAYOUT_VERSION_15: // this will not properly support version 15, we'll fix that on upgrade. case BRT_LAYOUT_VERSION_15: // this will not properly support version 15, we'll fix that on upgrade.
invariant(h->layout_version == BRT_LAYOUT_VERSION); invariant(h->layout_version == BRT_LAYOUT_VERSION);
......
...@@ -6,10 +6,11 @@ ...@@ -6,10 +6,11 @@
#include "includes.h" #include "includes.h"
#include "ule.h" #include "ule.h"
#include <brt-cachetable-wrappers.h> #include <brt-cachetable-wrappers.h>
#include <brt-flusher.h>
// dummymsn needed to simulate msn because messages are injected at a lower level than toku_brt_root_put_cmd() // dummymsn needed to simulate msn because messages are injected at a lower level than toku_brt_root_put_cmd()
#define MIN_DUMMYMSN ((MSN) {(uint64_t)1<<48}) #define MIN_DUMMYMSN ((MSN) {(uint64_t)100000000000})
static MSN dummymsn; static MSN dummymsn;
static int testsetup_initialized = 0; static int testsetup_initialized = 0;
...@@ -31,13 +32,21 @@ next_dummymsn(void) { ...@@ -31,13 +32,21 @@ next_dummymsn(void) {
BOOL ignore_if_was_already_open; BOOL ignore_if_was_already_open;
int toku_testsetup_leaf(BRT brt, BLOCKNUM *blocknum) { int toku_testsetup_leaf(BRT brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) {
BRTNODE node; BRTNODE node;
assert(testsetup_initialized); assert(testsetup_initialized);
int r = toku_read_brt_header_and_store_in_cachefile(brt, brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open); int r = toku_read_brt_header_and_store_in_cachefile(brt, brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open);
if (r!=0) return r; if (r!=0) return r;
toku_create_new_brtnode(brt, &node, 0, 1); toku_create_new_brtnode(brt, &node, 0, n_children);
BP_STATE(node,0) = PT_AVAIL; int i;
for (i=0; i<n_children; i++) {
BP_STATE(node,i) = PT_AVAIL;
}
for (i=0; i+1<n_children; i++) {
node->childkeys[i] = kv_pair_malloc(keys[i], keylens[i], 0, 0);
node->totalchildkeylens += keylens[i];
}
*blocknum = node->thisnodename; *blocknum = node->thisnodename;
toku_unpin_brtnode(brt, node); toku_unpin_brtnode(brt, node);
...@@ -71,7 +80,6 @@ int toku_testsetup_root(BRT brt, BLOCKNUM blocknum) { ...@@ -71,7 +80,6 @@ int toku_testsetup_root(BRT brt, BLOCKNUM blocknum) {
int r = toku_read_brt_header_and_store_in_cachefile(brt, brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open); int r = toku_read_brt_header_and_store_in_cachefile(brt, brt->cf, MAX_LSN, &brt->h, &ignore_if_was_already_open);
if (r!=0) return r; if (r!=0) return r;
brt->h->root = blocknum; brt->h->root = blocknum;
brt->h->root_hash.valid = FALSE;
return 0; return 0;
} }
...@@ -131,55 +139,22 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke ...@@ -131,55 +139,22 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke
toku_verify_or_set_counts(node); toku_verify_or_set_counts(node);
assert(node->height==0); assert(node->height==0);
size_t newlesize;
LEAFENTRY leafentry;
OMTVALUE storeddatav;
u_int32_t idx;
DBT keydbt,valdbt; DBT keydbt,valdbt;
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
BRT_MSG_S cmd = {BRT_INSERT, msn, xids_get_root_xids(), BRT_MSG_S cmd = {BRT_INSERT, msn, xids_get_root_xids(),
.u.id={toku_fill_dbt(&keydbt, key, keylen), .u.id={toku_fill_dbt(&keydbt, key, keylen),
toku_fill_dbt(&valdbt, val, vallen)}}; toku_fill_dbt(&valdbt, val, vallen)}};
//Generate a leafentry (committed insert key,val)
uint childnum = toku_brtnode_which_child(node,
&keydbt,
&brt->h->descriptor, brt->compare_fun);
BASEMENTNODE bn = BLB(node, childnum);
void * maybe_free = 0;
{
int64_t ignoreme;
r = apply_msg_to_leafentry(&cmd, NULL, //No old leafentry
&newlesize, &leafentry,
bn->buffer, &bn->buffer_mempool, &maybe_free,
NULL, NULL, &ignoreme);
assert(r==0);
}
brtnode_put_cmd (
struct cmd_leafval_heaviside_extra be = {brt->compare_fun, &brt->h->descriptor, &keydbt}; brt->h->compare_fun,
r = toku_omt_find_zero(BLB_BUFFER(node, 0), toku_cmd_leafval_heaviside, &be, &storeddatav, &idx); brt->h->update_fun,
&brt->h->descriptor,
node,
if (r==0) { &cmd,
LEAFENTRY storeddata=storeddatav; true,
// It's already there. So now we have to remove it and put the new one back in. NULL,
BLB_NBYTESINBUF(node, 0) -= leafentry_disksize(storeddata); NULL
toku_free(storeddata); );
// Now put the new kv in.
toku_omt_set_at(BLB_BUFFER(node, 0), leafentry, idx);
} else {
r = toku_omt_insert(BLB_BUFFER(node, 0), leafentry, toku_cmd_leafval_heaviside, &be, 0);
assert(r==0);
}
// hack to get tests passing. These tests should not be directly inserting into buffers
BLB(node, 0)->max_msn_applied = msn;
BLB_NBYTESINBUF(node, 0) += newlesize;
node->dirty=1;
toku_verify_or_set_counts(node); toku_verify_or_set_counts(node);
...@@ -194,6 +169,23 @@ testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) ...@@ -194,6 +169,23 @@ testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
return strcmp(s, t); return strcmp(s, t);
} }
void
toku_pin_node_with_min_bfe(BRTNODE* node, BLOCKNUM b, BRT t)
{
struct brtnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->h);
toku_pin_brtnode_off_client_thread(
t->h,
b,
toku_cachetable_hash(t->h->cf, b),
&bfe,
0,
NULL,
node
);
}
int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM blocknum, enum brt_msg_type cmdtype, char *key, int keylen, char *val, int vallen) { int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM blocknum, enum brt_msg_type cmdtype, char *key, int keylen, char *val, int vallen) {
void *node_v; void *node_v;
int r; int r;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
*/ */
#include "includes.h" #include "includes.h"
#include <brt-flusher.h>
static int static int
compare_pairs (BRT brt, struct kv_pair *a, struct kv_pair *b) { compare_pairs (BRT brt, struct kv_pair *a, struct kv_pair *b) {
...@@ -388,7 +389,7 @@ int ...@@ -388,7 +389,7 @@ int
toku_verify_brt_with_progress (BRT brt, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_on_going) { toku_verify_brt_with_progress (BRT brt, int (*progress_callback)(void *extra, float progress), void *progress_extra, int verbose, int keep_on_going) {
assert(brt->h); assert(brt->h);
u_int32_t root_hash; u_int32_t root_hash;
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &root_hash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt->h, &root_hash);
int r = toku_verify_brtnode(brt, ZERO_MSN, ZERO_MSN, *rootp, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going); int r = toku_verify_brtnode(brt, ZERO_MSN, ZERO_MSN, *rootp, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going);
if (r == 0) { if (r == 0) {
toku_brtheader_lock(brt->h); toku_brtheader_lock(brt->h);
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ enum brt_layout_version_e { ...@@ -22,6 +22,7 @@ enum brt_layout_version_e {
BRT_LAYOUT_VERSION_16 = 16, // Dr. No: No subtree estimates, partition layout information represented more transparently. BRT_LAYOUT_VERSION_16 = 16, // Dr. No: No subtree estimates, partition layout information represented more transparently.
// ALERT ALERT ALERT: version 16 never released to customers, internal and beta use only // ALERT ALERT ALERT: version 16 never released to customers, internal and beta use only
BRT_LAYOUT_VERSION_17 = 17, // Dr. No: Add STAT64INFO_S to brt_header BRT_LAYOUT_VERSION_17 = 17, // Dr. No: Add STAT64INFO_S to brt_header
BRT_LAYOUT_VERSION_18 = 18, // Dr. No: Add HOT info to brt_header
BRT_NEXT_VERSION, // the version after the current version BRT_NEXT_VERSION, // the version after the current version
BRT_LAYOUT_VERSION = BRT_NEXT_VERSION-1, // A hack so I don't have to change this line. BRT_LAYOUT_VERSION = BRT_NEXT_VERSION-1, // A hack so I don't have to change this line.
BRT_LAYOUT_MIN_SUPPORTED_VERSION = BRT_LAYOUT_VERSION_13, // Minimum version supported BRT_LAYOUT_MIN_SUPPORTED_VERSION = BRT_LAYOUT_VERSION_13, // Minimum version supported
......
...@@ -466,6 +466,19 @@ toku_cachetable_set_lock_unlock_for_io (CACHETABLE ct, void (*ydb_lock_callback) ...@@ -466,6 +466,19 @@ toku_cachetable_set_lock_unlock_for_io (CACHETABLE ct, void (*ydb_lock_callback)
ct->ydb_unlock_callback = ydb_unlock_callback; ct->ydb_unlock_callback = ydb_unlock_callback;
} }
void
toku_cachetable_call_ydb_lock(CACHEFILE cf){
if (cf->cachetable->ydb_lock_callback) {
assert(cf->cachetable->ydb_unlock_callback);
cf->cachetable->ydb_lock_callback();
}
}
void
toku_cachetable_call_ydb_unlock(CACHEFILE cf){
if (cf->cachetable->ydb_unlock_callback) cf->cachetable->ydb_unlock_callback();
}
// //
// Increment the reference count // Increment the reference count
// MUST HOLD cachetable lock // MUST HOLD cachetable lock
......
...@@ -523,6 +523,8 @@ char * toku_cachetable_get_fname_in_cwd(CACHETABLE ct, const char * fname_in_env ...@@ -523,6 +523,8 @@ char * toku_cachetable_get_fname_in_cwd(CACHETABLE ct, const char * fname_in_env
void toku_cachetable_set_lock_unlock_for_io (CACHETABLE ct, void (*ydb_lock_callback)(void), void (*ydb_unlock_callback)(void)); void toku_cachetable_set_lock_unlock_for_io (CACHETABLE ct, void (*ydb_lock_callback)(void), void (*ydb_unlock_callback)(void));
// Effect: When we do I/O we may need to release locks (e.g., the ydb lock). These functions release the lock acquire the lock. // Effect: When we do I/O we may need to release locks (e.g., the ydb lock). These functions release the lock acquire the lock.
void toku_cachetable_call_ydb_lock(CACHEFILE cf);
void toku_cachetable_call_ydb_unlock(CACHEFILE cf);
void cachefile_kibbutz_enq (CACHEFILE cf, void (*f)(void*), void *extra); void cachefile_kibbutz_enq (CACHEFILE cf, void (*f)(void*), void *extra);
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef TOKU_DBUFIO_H #ifndef TOKU_DBUFIO_H
#define TOKU_DBUFIO_H #define TOKU_DBUFIO_H
#ident "$Id: queue.c 20104 2010-05-12 17:22:40Z bkuszmaul $" #ident "$Id$"
#ident "Copyright (c) 2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2010 Tokutek Inc. All rights reserved."
#include <toku_portability.h> #include <toku_portability.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$"
#ident "Copyright (c) 2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$"
#ident "Copyright (c) 2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id: mempool.c 19902 2010-05-06 20:41:32Z bkuszmaul $" #ident "$Id$"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
#ifndef _TOKU_MEMPOOL_H #ifndef _TOKU_MEMPOOL_H
#define _TOKU_MEMPOOL_H #define _TOKU_MEMPOOL_H
#ident "$Id: mempool.h 19902 2010-05-06 20:41:32Z bkuszmaul $" #ident "$Id$"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef TOKU_NBMUTEX_H #ifndef TOKU_NBMUTEX_H
#define TOKU_NBMUTEX_H #define TOKU_NBMUTEX_H
#ident "$Id: rwlock.h 32279 2011-06-29 13:51:57Z bkuszmaul $" #ident "$Id$"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id: brt-serialize-test.c 36450 2011-11-02 20:10:18Z bperlman $" #ident "$Id$"
#ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007, 2008 Tokutek Inc. All rights reserved."
#include "test.h" #include "test.h"
......
#ident "$Id: cachetable-simple-verify.c 36579 2011-11-04 20:02:04Z zardosht $" /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h" #include "includes.h"
#include "test.h" #include "test.h"
......
...@@ -136,7 +136,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -136,7 +136,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -126,7 +126,7 @@ test_msnfilter(int do_verify) { ...@@ -126,7 +126,7 @@ test_msnfilter(int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
BRTNODE newroot = make_node(brt, 0); BRTNODE newroot = make_node(brt, 0);
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$" #ident "$Id$"
#ident "Copyright (c) 2011 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/* Test an overflow condition on the leaf. See #632. */ /* Test an overflow condition on the leaf. See #632. */
......
...@@ -34,7 +34,7 @@ doit (void) { ...@@ -34,7 +34,7 @@ doit (void) {
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
r = toku_testsetup_leaf(t, &nodea); r = toku_testsetup_leaf(t, &nodea, 1, NULL, NULL);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(t, 1, &nodeb, 1, &nodea, 0, 0); r = toku_testsetup_nonleaf(t, 1, &nodeb, 1, &nodea, 0, 0);
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#ident "$Id$"
/* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h"
#include "includes.h"
#include <brt-cachetable-wrappers.h>
#include "brt-flusher.h"
#include "checkpoint.h"
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
BRT brt;
int fnamelen;
char *fname;
static int update_func(
DB* UU(db),
const DBT* key,
const DBT* old_val,
const DBT* UU(extra),
void (*set_val)(const DBT *new_val, void *set_extra),
void *set_extra)
{
DBT new_val;
assert(old_val->size > 0);
if (verbose) {
printf("applying update to %s\n", (char *)key->data);
}
toku_init_dbt(&new_val);
set_val(&new_val, set_extra);
return 0;
}
static void
doit (void) {
BLOCKNUM node_leaf;
BLOCKNUM node_internal, node_root;
int r;
fnamelen = strlen(__FILE__) + 20;
fname = toku_malloc(fnamelen);
assert(fname!=0);
snprintf(fname, fnamelen, "%s.brt", __FILE__);
r = toku_brt_create_cachetable(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); assert(r==0);
unlink(fname);
r = toku_open_brt(fname, 1, &brt, NODESIZE, NODESIZE/2, ct, null_txn, toku_builtin_compare_fun, null_db);
assert(r==0);
toku_free(fname);
brt->update_fun = update_func;
brt->h->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls
char* pivots[1];
pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6;
r = toku_testsetup_leaf(brt, &node_leaf, 2, pivots, &pivot_len);
assert(r==0);
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r==0);
r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0);
r = toku_testsetup_root(brt, node_root);
assert(r==0);
//
// at this point we have created a tree with a root, an internal node,
// and two leaf nodes, the pivot being "kkkkk"
//
// now we insert a row into each leaf node
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"a", // key
2, // keylen
"aa",
3
);
assert(r==0);
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"z", // key
2, // keylen
"zz",
3
);
assert(r==0);
char filler[400];
memset(filler, 0, sizeof(filler));
// now we insert filler data so that the rebalance
// keeps it at two nodes
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"b", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"y", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
//
// now insert a bunch of dummy delete messages
// into the internal node, to get its cachepressure size up
//
for (int i = 0; i < 100000; i++) {
r = toku_testsetup_insert_to_nonleaf (
brt,
node_internal,
BRT_DELETE_ANY,
"jj", // this key does not exist, so its message application should be a no-op
3,
NULL,
0
);
assert(r==0);
}
//
// now insert a broadcast message into the root
//
r = toku_testsetup_insert_to_nonleaf (
brt,
node_root,
BRT_UPDATE_BROADCAST_ALL,
NULL,
0,
NULL,
0
);
assert(r==0);
// now lock and release the leaf node to make sure it is what we expect it to be.
BRTNODE node = NULL;
struct brtnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_leaf,
toku_cachetable_hash(brt->h->cf, node_leaf),
&bfe,
0,
NULL,
&node
);
assert(node->dirty);
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL);
toku_unpin_brtnode_off_client_thread(brt->h, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k;
struct check_pair pair = {2, "a", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0);
//
// pin the leaf one more time
// and make sure that one basement
// node is in memory and another is
// on disk
//
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_leaf,
toku_cachetable_hash(brt->h->cf, node_leaf),
&bfe,
0,
NULL,
&node
);
assert(node->dirty);
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL);
toku_unpin_brtnode_off_client_thread(brt->h, node);
//
// now let us induce a clean on the internal node
//
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
&bfe,
0,
NULL,
&node
);
assert(node->dirty);
// we expect that this flushes its buffer, that
// a merge is not done, and that the lookup
// of values "a" and "z" still works
r = toku_brtnode_cleaner_callback(
node,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
brt->h
);
// verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
&bfe,
0,
NULL,
&node
);
// check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
toku_unpin_brtnode_off_client_thread(brt->h, node);
//
// now run a checkpoint to get everything clean,
// and to get the rebalancing to happen
//
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
assert(r==0);
struct check_pair pair2 = {2, "z", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
assert(r==0);
r = toku_close_brt(brt, 0); assert(r==0);
r = toku_cachetable_close(&ct); assert(r==0);
toku_free(pivots[0]);
}
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
doit();
return 0;
}
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#ident "$Id$"
/* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h"
#include "includes.h"
#include <brt-cachetable-wrappers.h>
#include "brt-flusher.h"
#include "checkpoint.h"
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
BRT brt;
int fnamelen;
char *fname;
static int update_func(
DB* UU(db),
const DBT* key,
const DBT* old_val,
const DBT* UU(extra),
void (*set_val)(const DBT *new_val, void *set_extra),
void *set_extra)
{
DBT new_val;
assert(old_val->size > 0);
if (verbose) {
printf("applying update to %s\n", (char *)key->data);
}
toku_init_dbt(&new_val);
set_val(&new_val, set_extra);
return 0;
}
static void
doit (void) {
BLOCKNUM node_leaf;
BLOCKNUM node_internal, node_root;
int r;
fnamelen = strlen(__FILE__) + 20;
fname = toku_malloc(fnamelen);
assert(fname!=0);
snprintf(fname, fnamelen, "%s.brt", __FILE__);
r = toku_brt_create_cachetable(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); assert(r==0);
unlink(fname);
r = toku_open_brt(fname, 1, &brt, NODESIZE, NODESIZE/2, ct, null_txn, toku_builtin_compare_fun, null_db);
assert(r==0);
toku_free(fname);
brt->update_fun = update_func;
brt->h->update_fun = update_func;
toku_testsetup_initialize(); // must precede any other toku_testsetup calls
char* pivots[1];
pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6;
r = toku_testsetup_leaf(brt, &node_leaf, 2, pivots, &pivot_len);
assert(r==0);
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 1, &node_leaf, 0, 0);
assert(r==0);
r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0);
r = toku_testsetup_root(brt, node_root);
assert(r==0);
//
// at this point we have created a tree with a root, an internal node,
// and two leaf nodes, the pivot being "kkkkk"
//
// now we insert a row into each leaf node
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"a", // key
2, // keylen
"aa",
3
);
assert(r==0);
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"z", // key
2, // keylen
"zz",
3
);
assert(r==0);
char filler[400];
memset(filler, 0, sizeof(filler));
// now we insert filler data so that the rebalance
// keeps it at two nodes
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"b", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
r = toku_testsetup_insert_to_leaf (
brt,
node_leaf,
"y", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
//
// now insert a bunch of dummy delete messages
// into the internal node, to get its cachepressure size up
//
for (int i = 0; i < 100000; i++) {
r = toku_testsetup_insert_to_nonleaf (
brt,
node_internal,
BRT_DELETE_ANY,
"jj", // this key does not exist, so its message application should be a no-op
3,
NULL,
0
);
assert(r==0);
}
//
// now insert a broadcast message into the root
//
r = toku_testsetup_insert_to_nonleaf (
brt,
node_root,
BRT_UPDATE_BROADCAST_ALL,
NULL,
0,
NULL,
0
);
assert(r==0);
//
// now run a checkpoint to get everything clean
//
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// now lock and release the leaf node to make sure it is what we expect it to be.
BRTNODE node = NULL;
struct brtnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_leaf,
toku_cachetable_hash(brt->h->cf, node_leaf),
&bfe,
0,
NULL,
&node
);
assert(!node->dirty);
assert(node->n_children == 2);
// a hack to get the basement nodes evicted
for (int i = 0; i < 20; i++) {
PAIR_ATTR attr;
toku_brtnode_pe_callback(node, make_pair_attr(0xffffffff), &attr, NULL);
}
// this ensures that when we do the lookups below,
// that the data is read off disk
assert(BP_STATE(node,0) == PT_ON_DISK);
assert(BP_STATE(node,1) == PT_ON_DISK);
toku_unpin_brtnode_off_client_thread(brt->h, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k;
struct check_pair pair = {2, "a", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0);
//
// pin the leaf one more time
// and make sure that one basement
// node is in memory and another is
// on disk
//
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_leaf,
toku_cachetable_hash(brt->h->cf, node_leaf),
&bfe,
0,
NULL,
&node
);
assert(!node->dirty);
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_ON_DISK);
toku_unpin_brtnode_off_client_thread(brt->h, node);
//
// now let us induce a clean on the internal node
//
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
&bfe,
0,
NULL,
&node
);
assert(!node->dirty);
// we expect that this flushes its buffer, that
// a merge is not done, and that the lookup
// of values "a" and "z" still works
r = toku_brtnode_cleaner_callback(
node,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
brt->h
);
// verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread(
brt->h,
node_internal,
toku_cachetable_hash(brt->h->cf, node_internal),
&bfe,
0,
NULL,
&node
);
// check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
toku_unpin_brtnode_off_client_thread(brt->h, node);
//
// now run a checkpoint to get everything clean,
// and to get the rebalancing to happen
//
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
assert(r==0);
struct check_pair pair2 = {2, "z", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "z", 2), lookup_checkf, &pair2);
assert(r==0);
r = toku_close_brt(brt, 0); assert(r==0);
r = toku_cachetable_close(&ct); assert(r==0);
toku_free(pivots[0]);
}
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
doit();
return 0;
}
...@@ -65,7 +65,7 @@ doit (int ksize __attribute__((__unused__))) { ...@@ -65,7 +65,7 @@ doit (int ksize __attribute__((__unused__))) {
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
for (i=0; i<BRT_FANOUT; i++) { for (i=0; i<BRT_FANOUT; i++) {
r=toku_testsetup_leaf(t, &cnodes[i]); r=toku_testsetup_leaf(t, &cnodes[i], 1, NULL, NULL);
assert(r==0); assert(r==0);
char key[KSIZE+10]; char key[KSIZE+10];
int keylen = 1+snprintf(key, KSIZE, "%08d%0*d", i*10000+1, KSIZE-9, 0); int keylen = 1+snprintf(key, KSIZE, "%08d%0*d", i*10000+1, KSIZE-9, 0);
......
#ident "$Id: test-del-inorder.c 32975 2011-07-11 23:42:51Z leifwalsh $" /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#ident "$Id: test-merges-on-cleaner.c 38542 2012-01-06 14:06:23Z christianrober $"
/* The goal of this test. Make sure that inserts stay behind deletes. */ /* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h" #include "test.h"
#include "includes.h" #include "includes.h"
#include <brt-cachetable-wrappers.h> #include <brt-cachetable-wrappers.h>
#include "brt-flusher.h"
#include "checkpoint.h"
static TOKUTXN const null_txn = 0; static TOKUTXN const null_txn = 0;
static DB * const null_db = 0; static DB * const null_db = 0;
...@@ -58,9 +62,9 @@ doit (void) { ...@@ -58,9 +62,9 @@ doit (void) {
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
r = toku_testsetup_leaf(brt, &node_leaf[0]); r = toku_testsetup_leaf(brt, &node_leaf[0], 1, NULL, NULL);
assert(r==0); assert(r==0);
r = toku_testsetup_leaf(brt, &node_leaf[1]); r = toku_testsetup_leaf(brt, &node_leaf[1], 1, NULL, NULL);
assert(r==0); assert(r==0);
char* pivots[1]; char* pivots[1];
...@@ -70,7 +74,7 @@ doit (void) { ...@@ -70,7 +74,7 @@ doit (void) {
r = toku_testsetup_nonleaf(brt, 1, &node_internal, 2, node_leaf, pivots, &pivot_len); r = toku_testsetup_nonleaf(brt, 1, &node_internal, 2, node_leaf, pivots, &pivot_len);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(brt, 1, &node_root, 1, &node_internal, 0, 0); r = toku_testsetup_nonleaf(brt, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0); assert(r==0);
r = toku_testsetup_root(brt, node_root); r = toku_testsetup_root(brt, node_root);
...@@ -131,6 +135,15 @@ doit (void) { ...@@ -131,6 +135,15 @@ doit (void) {
0 0
); );
assert(r==0); assert(r==0);
//
// now let us induce a clean on the internal node
//
BRTNODE node;
toku_pin_node_with_min_bfe(&node, node_leaf[1], brt);
// hack to get merge going
BLB_SEQINSERT(node, node->n_children-1) = FALSE;
toku_unpin_brtnode(brt, node);
// now do a lookup on one of the keys, this should bring a leaf node up to date // now do a lookup on one of the keys, this should bring a leaf node up to date
DBT k; DBT k;
...@@ -138,10 +151,6 @@ doit (void) { ...@@ -138,10 +151,6 @@ doit (void) {
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0); assert(r==0);
//
// now let us induce a clean on the internal node
//
BRTNODE node;
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt->h); fill_bfe_for_min_read(&bfe, brt->h);
toku_pin_brtnode_off_client_thread( toku_pin_brtnode_off_client_thread(
...@@ -175,12 +184,19 @@ doit (void) { ...@@ -175,12 +184,19 @@ doit (void) {
NULL, NULL,
&node &node
); );
// check that no merge happened // check that merge happened
assert(node->n_children == 2); assert(node->n_children == 1);
// check that buffers are empty // check that buffers are empty
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0); assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
assert(toku_bnc_nbytesinbuf(BNC(node, 1)) == 0);
toku_unpin_brtnode_off_client_thread(brt->h, node); toku_unpin_brtnode_off_client_thread(brt->h, node);
//
// now run a checkpoint to get everything clean,
// and to get the rebalancing to happen
//
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// check that lookups on the two keys is still good // check that lookups on the two keys is still good
struct check_pair pair1 = {2, "a", 0, NULL, 0}; struct check_pair pair1 = {2, "a", 0, NULL, 0};
r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1); r = toku_brt_lookup(brt, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair1);
...@@ -198,6 +214,7 @@ doit (void) { ...@@ -198,6 +214,7 @@ doit (void) {
int int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) { test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
default_parse_args(argc, argv);
doit(); doit();
return 0; return 0;
} }
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$"
#ident "Copyright (c) 2011 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/* The goal of this test. Make sure that inserts stay behind deletes. */
#include "test.h"
#include "includes.h"
#include <brt-cachetable-wrappers.h>
#include "brt-flusher.h"
#include "brt-flusher-internal.h"
#include "checkpoint.h"
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
enum { NODESIZE = 1024, KSIZE=NODESIZE-100, TOKU_PSIZE=20 };
CACHETABLE ct;
BRT t;
int fnamelen;
char *fname;
int curr_child_to_flush;
int num_flushes_called;
static int child_to_flush(struct brt_header* UU(h), BRTNODE parent, void* UU(extra)) {
// internal node has 2 children
if (parent->height == 1) {
assert(parent->n_children == 2);
return curr_child_to_flush;
}
// root has 1 child
else if (parent->height == 2) {
assert(parent->n_children == 1);
return 0;
}
else {
assert(FALSE);
}
return curr_child_to_flush;
}
static void update_status(BRTNODE UU(child), int UU(dirtied), void* UU(extra)) {
num_flushes_called++;
}
static bool
dont_destroy_bn(void* UU(extra))
{
return false;
}
static void merge_should_not_happen(struct flusher_advice* UU(fa),
struct brt_header* UU(h),
BRTNODE UU(parent),
int UU(childnum),
BRTNODE UU(child),
void* UU(extra))
{
assert(FALSE);
}
static bool recursively_flush_should_not_happen(BRTNODE UU(child), void* UU(extra)) {
assert(FALSE);
}
static bool always_flush(BRTNODE UU(child), void* UU(extra)) {
return true;
}
static void
doit (void) {
BLOCKNUM node_internal, node_root;
BLOCKNUM node_leaf[2];
int r;
fnamelen = strlen(__FILE__) + 20;
fname = toku_malloc(fnamelen);
assert(fname!=0);
snprintf(fname, fnamelen, "%s.brt", __FILE__);
r = toku_brt_create_cachetable(&ct, 500*1024*1024, ZERO_LSN, NULL_LOGGER); assert(r==0);
unlink(fname);
r = toku_open_brt(fname, 1, &t, NODESIZE, NODESIZE/2, ct, null_txn, toku_builtin_compare_fun, null_db);
assert(r==0);
toku_free(fname);
toku_testsetup_initialize(); // must precede any other toku_testsetup calls
r = toku_testsetup_leaf(t, &node_leaf[0], 1, NULL, NULL);
assert(r==0);
r = toku_testsetup_leaf(t, &node_leaf[1], 1, NULL, NULL);
assert(r==0);
char* pivots[1];
pivots[0] = toku_strdup("kkkkk");
int pivot_len = 6;
r = toku_testsetup_nonleaf(t, 1, &node_internal, 2, node_leaf, pivots, &pivot_len);
assert(r==0);
r = toku_testsetup_nonleaf(t, 2, &node_root, 1, &node_internal, 0, 0);
assert(r==0);
r = toku_testsetup_root(t, node_root);
assert(r==0);
char filler[900];
memset(filler, 0, sizeof(filler));
// now we insert filler data so that a merge does not happen
r = toku_testsetup_insert_to_leaf (
t,
node_leaf[0],
"b", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
r = toku_testsetup_insert_to_leaf (
t,
node_leaf[1],
"y", // key
2, // keylen
filler,
sizeof(filler)
);
assert(r==0);
// make buffers in internal node non-empty
r = toku_testsetup_insert_to_nonleaf(
t,
node_internal,
BRT_INSERT,
"a",
2,
NULL,
0
);
assert_zero(r);
r = toku_testsetup_insert_to_nonleaf(
t,
node_internal,
BRT_INSERT,
"z",
2,
NULL,
0
);
assert_zero(r);
//
// now run a checkpoint to get everything clean
//
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
// now with setup done, start the test
// test that if flush_some_child properly honors
// what we say and flushes the child we pick
BRTNODE node = NULL;
toku_pin_node_with_min_bfe(&node, node_internal, t);
toku_assert_entire_node_in_memory(node);
assert(node->n_children == 2);
assert(!node->dirty);
assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) > 0);
assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) > 0);
struct flusher_advice fa;
flusher_advice_init(
&fa,
child_to_flush,
dont_destroy_bn,
recursively_flush_should_not_happen,
merge_should_not_happen,
update_status,
default_pick_child_after_split,
NULL
);
curr_child_to_flush = 0;
num_flushes_called = 0;
flush_some_child(t->h, node, &fa);
assert(num_flushes_called == 1);
toku_pin_node_with_min_bfe(&node, node_internal, t);
toku_assert_entire_node_in_memory(node);
assert(node->dirty);
assert(node->n_children == 2);
// child 0 should have empty buffer because it flushed
// child 1 should still have message in buffer
assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) > 0);
toku_unpin_brtnode(t, node);
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
toku_pin_node_with_min_bfe(&node, node_internal, t);
assert(!node->dirty);
curr_child_to_flush = 1;
num_flushes_called = 0;
flush_some_child(t->h, node, &fa);
assert(num_flushes_called == 1);
toku_pin_node_with_min_bfe(&node, node_internal, t);
assert(node->dirty);
toku_assert_entire_node_in_memory(node);
assert(node->n_children == 2);
// both buffers should be empty now
assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) == 0);
// now let's do a flush with an empty buffer, make sure it is ok
toku_unpin_brtnode(t, node);
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
toku_pin_node_with_min_bfe(&node, node_internal, t);
assert(!node->dirty);
curr_child_to_flush = 0;
num_flushes_called = 0;
flush_some_child(t->h, node, &fa);
assert(num_flushes_called == 1);
toku_pin_node_with_min_bfe(&node, node_internal, t);
assert(!node->dirty); // nothing was flushed, so node better not be dirty
toku_assert_entire_node_in_memory(node);
assert(node->n_children == 2);
// both buffers should be empty now
assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) == 0);
assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) == 0);
toku_unpin_brtnode(t, node);
// now let's start a flush from the root, that always recursively flushes
flusher_advice_init(
&fa,
child_to_flush,
dont_destroy_bn,
always_flush,
merge_should_not_happen,
update_status,
default_pick_child_after_split,
NULL
);
// use a for loop so to get us down both paths
for (int i = 0; i < 2; i++) {
toku_pin_node_with_min_bfe(&node, node_root, t);
toku_assert_entire_node_in_memory(node); // entire root is in memory
curr_child_to_flush = i;
num_flushes_called = 0;
flush_some_child(t->h, node, &fa);
assert(num_flushes_called == 2);
toku_pin_node_with_min_bfe(&node, node_internal, t);
assert(!node->dirty); // nothing was flushed, so node better not be dirty
toku_unpin_brtnode(t, node);
toku_pin_node_with_min_bfe(&node, node_leaf[0], t);
assert(!node->dirty); // nothing was flushed, so node better not be dirty
toku_unpin_brtnode(t, node);
toku_pin_node_with_min_bfe(&node, node_leaf[1], t);
assert(!node->dirty); // nothing was flushed, so node better not be dirty
toku_unpin_brtnode(t, node);
}
// now one more test to show a bug was fixed
// if there is nothing to flush from parent to child,
// and child is not fully in memory, we used to crash
// so, to make sure that is fixed, let's get internal to not
// be fully in memory, and make sure the above test works
// a hack to get internal compressed
r = toku_testsetup_insert_to_nonleaf(
t,
node_internal,
BRT_INSERT,
"c",
2,
NULL,
0
);
assert_zero(r);
r = toku_checkpoint(ct, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
toku_pin_node_with_min_bfe(&node, node_internal, t);
for (int i = 0; i < 20; i++) {
PAIR_ATTR attr;
toku_brtnode_pe_callback(node, make_pair_attr(0xffffffff), &attr, NULL);
}
assert(BP_STATE(node,0) == PT_COMPRESSED);
toku_unpin_brtnode(t, node);
//now let's do the same test as above
toku_pin_node_with_min_bfe(&node, node_root, t);
toku_assert_entire_node_in_memory(node); // entire root is in memory
curr_child_to_flush = 0;
num_flushes_called = 0;
flush_some_child(t->h, node, &fa);
assert(num_flushes_called == 2);
r = toku_close_brt(t, 0); assert(r==0);
r = toku_cachetable_close(&ct); assert(r==0);
toku_free(pivots[0]);
}
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
doit();
return 0;
}
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "includes.h" #include "includes.h"
#include <brt-cachetable-wrappers.h> #include <brt-cachetable-wrappers.h>
#include <brt-flusher.h>
// Some constants to be used in calculations below // Some constants to be used in calculations below
static const int nodesize = 1024; // Target max node size static const int nodesize = 1024; // Target max node size
...@@ -27,8 +28,6 @@ static TOKUTXN const null_txn = 0; ...@@ -27,8 +28,6 @@ static TOKUTXN const null_txn = 0;
static DB * const null_db = 0; static DB * const null_db = 0;
static const char fname[]= __FILE__ ".brt"; static const char fname[]= __FILE__ ".brt";
static BRT_STATUS_S my_brt_status;
static int omt_long_cmp(OMTVALUE p, void *q) static int omt_long_cmp(OMTVALUE p, void *q)
{ {
LEAFENTRY a = p, b = q; LEAFENTRY a = p, b = q;
...@@ -172,7 +171,7 @@ test_split_on_boundary(void) ...@@ -172,7 +171,7 @@ test_split_on_boundary(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
...@@ -245,7 +244,7 @@ test_split_with_everything_on_the_left(void) ...@@ -245,7 +244,7 @@ test_split_with_everything_on_the_left(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
toku_unpin_brtnode(brt, nodeb); toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0); r = toku_close_brt(brt, NULL); assert(r == 0);
...@@ -320,7 +319,7 @@ test_split_on_boundary_of_last_node(void) ...@@ -320,7 +319,7 @@ test_split_on_boundary_of_last_node(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
toku_unpin_brtnode(brt, nodeb); toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0); r = toku_close_brt(brt, NULL); assert(r == 0);
...@@ -388,7 +387,7 @@ test_split_at_begin(void) ...@@ -388,7 +387,7 @@ test_split_at_begin(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
toku_unpin_brtnode(brt, nodeb); toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0); r = toku_close_brt(brt, NULL); assert(r == 0);
...@@ -452,7 +451,7 @@ test_split_at_end(void) ...@@ -452,7 +451,7 @@ test_split_at_end(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
toku_unpin_brtnode(brt, nodeb); toku_unpin_brtnode(brt, nodeb);
r = toku_close_brt(brt, NULL); assert(r == 0); r = toku_close_brt(brt, NULL); assert(r == 0);
...@@ -506,7 +505,7 @@ test_split_odd_nodes(void) ...@@ -506,7 +505,7 @@ test_split_odd_nodes(void)
BRTNODE nodea, nodeb; BRTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL, &my_brt_status); brtleaf_split(brt->h, &sn, &nodea, &nodeb, &splitk, TRUE, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
......
...@@ -35,7 +35,7 @@ doit (void) { ...@@ -35,7 +35,7 @@ doit (void) {
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
r = toku_testsetup_leaf(t, &node_leaf); r = toku_testsetup_leaf(t, &node_leaf, 1, NULL, NULL);
assert(r==0); assert(r==0);
r = toku_testsetup_nonleaf(t, 1, &node_internal, 1, &node_leaf, 0, 0); r = toku_testsetup_nonleaf(t, 1, &node_internal, 1, &node_leaf, 0, 0);
......
...@@ -142,7 +142,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -142,7 +142,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -66,7 +66,7 @@ test_dup_in_leaf(int do_verify) { ...@@ -66,7 +66,7 @@ test_dup_in_leaf(int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
BRTNODE newroot = make_node(brt, 0); BRTNODE newroot = make_node(brt, 0);
populate_leaf(newroot, htonl(2), 1); populate_leaf(newroot, htonl(2), 1);
......
...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -127,7 +127,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -127,7 +127,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -66,7 +66,7 @@ test_dup_in_leaf(int do_verify) { ...@@ -66,7 +66,7 @@ test_dup_in_leaf(int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
BRTNODE newroot = make_node(brt, 0); BRTNODE newroot = make_node(brt, 0);
populate_leaf(newroot, htonl(2), 1); populate_leaf(newroot, htonl(2), 1);
......
...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -112,7 +112,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// discard the old root block // discard the old root block
u_int32_t fullhash = 0; u_int32_t fullhash = 0;
CACHEKEY *rootp; CACHEKEY *rootp;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt->h, &fullhash);
// set the new root to point to the new tree // set the new root to point to the new tree
*rootp = newroot->thisnodename; *rootp = newroot->thisnodename;
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#ifndef TOKU_ULE_INTERNAL_H #ifndef TOKU_ULE_INTERNAL_H
#define TOKU_ULE_INTERNAL_H #define TOKU_ULE_INTERNAL_H
#ident "$Id: ule.h 24600 2010-10-15 15:22:18Z dwells $" #ident "$Id$"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
......
...@@ -136,6 +136,7 @@ BDB_DONTRUN_TESTS = \ ...@@ -136,6 +136,7 @@ BDB_DONTRUN_TESTS = \
hotindexer-simple-abort \ hotindexer-simple-abort \
hotindexer-undo-do-test \ hotindexer-undo-do-test \
hotindexer-with-queries \ hotindexer-with-queries \
hot-optimize-table-tests \
insert-dup-prelock \ insert-dup-prelock \
isolation \ isolation \
isolation-read-committed \ isolation-read-committed \
...@@ -278,6 +279,7 @@ BDB_DONTRUN_TESTS = \ ...@@ -278,6 +279,7 @@ BDB_DONTRUN_TESTS = \
test_stress4 \ test_stress4 \
test_stress5 \ test_stress5 \
test_stress6 \ test_stress6 \
test_stress7 \
test_stress_with_verify \ test_stress_with_verify \
test_transactional_descriptor \ test_transactional_descriptor \
test_trans_desc_during_chkpt \ test_trans_desc_during_chkpt \
...@@ -355,6 +357,7 @@ DEPENDS_ON_STRESS_HELPERS = \ ...@@ -355,6 +357,7 @@ DEPENDS_ON_STRESS_HELPERS = \
test_stress4 \ test_stress4 \
test_stress5 \ test_stress5 \
test_stress6 \ test_stress6 \
test_stress7 \
#blank #blank
$(patsubst %,%.tdb,$(DEPENDS_ON_STRESS_HELPERS)): threaded_stress_test_helpers.h $(patsubst %,%.tdb,$(DEPENDS_ON_STRESS_HELPERS)): threaded_stress_test_helpers.h
...@@ -682,6 +685,7 @@ test_update_broadcast_stress.tdbrun: VGRIND= ...@@ -682,6 +685,7 @@ test_update_broadcast_stress.tdbrun: VGRIND=
test_update_stress.tdbrun: VGRIND= test_update_stress.tdbrun: VGRIND=
stress-test.tdbrun: VGRIND= stress-test.tdbrun: VGRIND=
stress-test.bdbrun: VGRIND= stress-test.bdbrun: VGRIND=
hot-optimize-table-tests.tdbrun: VGRIND=
libs: libs:
......
/* -*- mode: C; c-basic-offset: 4 -*- */
// hot-optimize-table-tests.c
#include "test.h"
#include "includes.h"
#include <brt-cachetable-wrappers.h>
#include "db.h"
#include "ydb.h"
const int envflags = DB_INIT_MPOOL |
DB_CREATE |
DB_THREAD |
DB_INIT_LOCK |
DB_INIT_LOG |
DB_INIT_TXN |
DB_PRIVATE;
DB_ENV* env;
unsigned int leaf_hits;
// Custom Update Function for our test BRT.
static int
update_func(DB* UU(db),
const DBT* key,
const DBT* old_val,
const DBT* extra,
void (*set_val)(const DBT* new_val, void* set_extra) __attribute__((unused)),
void* UU(set_extra))
{
unsigned int *x_results;
assert(extra->size == sizeof x_results);
x_results = *(unsigned int **) extra->data;
assert(x_results);
assert(old_val->size > 0);
unsigned int* indexptr;
assert(key->size == (sizeof *indexptr));
indexptr = (unsigned int*)key->data;
++leaf_hits;
if (verbose && x_results[*indexptr] != 0) {
printf("x_results = %p, indexptr = %p, *indexptr = %u, x_results[*indexptr] = %u\n", x_results, indexptr, *indexptr, x_results[*indexptr]);
}
assert(x_results[*indexptr] == 0);
x_results[*indexptr]++;
// ++(x_results[*indexptr]);
// memset(&new_val, 0, sizeof(new_val));
// set_val(&new_val, set_extra);
unsigned int i = *indexptr;
if (verbose && ((i + 1) % 50000 == 0)) {
printf("applying update to %u\n", i);
//printf("x_results[] = %u\n", x_results[*indexptr]);
}
return 0;
}
///
static void
hot_test_setup(void)
{
int r = 0;
// Remove any previous environment.
CHK(system("rm -rf " ENVDIR));
// Set up a new TokuDB.
CHK(toku_os_mkdir(ENVDIR, S_IRWXU+S_IRWXG+S_IRWXO));
CHK(db_env_create(&env, 0));
env->set_errfile(env, stderr);
r = env->set_default_bt_compare(env, uint_dbt_cmp);CKERR(r);
env->set_update(env, update_func);
CHK(env->open(env, ENVDIR, envflags, S_IRWXU+S_IRWXG+S_IRWXO));
}
///
static void
hot_insert_keys(DB* db, unsigned int key_count)
{
int r = 0;
DB_TXN * xact;
unsigned int limit = 1;
if (key_count > 10) {
limit = 100000;
}
// Dummy data.
const unsigned int DUMMY_SIZE = 100;
size_t size = DUMMY_SIZE;
char* dummy = NULL;
dummy = (char*)toku_xmalloc(size);
memset(dummy, 0, size);
// Start the transaction for insertions.
//
r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
unsigned int key;
DBT key_thing;
DBT *keyptr = dbt_init(&key_thing, &key, sizeof(key));
DBT value_thing;
DBT *valueptr = dbt_init(&value_thing, dummy, size);
for (key = 0; key < key_count; ++key)
{
CHK(db->put(db, xact, keyptr, valueptr, 0));
// DEBUG OUTPUT
//
if (verbose && (key + 1) % limit == 0) {
printf("%d Elements inserted.\n", key + 1);
}
}
// Commit the insert transaction.
//
r = xact->commit(xact, 0); CKERR(r);
toku_free(dummy);
}
///
static void
hot_create_db(DB** db, const char* c)
{
int r = 0;
DB_TXN* xact;
verbose ? printf("Creating DB.\n") : 0;
r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
CHK(db_create(db, env, 0));
CHK((*db)->open((*db), xact, c, NULL, DB_BTREE, DB_CREATE, 0666));
r = xact->commit(xact, 0); CKERR(r);
verbose ? printf("DB Created.\n") : 0;
}
///
static void
hot_test(DB* db, unsigned int size)
{
int r = 0;
leaf_hits = 0;
verbose ? printf("Insert some data.\n") : 0;
// Insert our keys to assemble the tree.
hot_insert_keys(db, size);
// Insert Broadcast Message.
verbose ? printf("Insert Broadcast Message.\n") : 0;
unsigned int *XMALLOC_N(size, x_results);
memset(x_results, 0, (sizeof x_results[0]) * size);
DBT extra;
DBT *extrap = dbt_init(&extra, &x_results, sizeof x_results);
DB_TXN * xact;
r = env->txn_begin(env, 0, &xact, 0); CKERR(r);
r = CHK(db->update_broadcast(db, xact, extrap, 0));
r = xact->commit(xact, 0); CKERR(r);
// Flatten the tree.
verbose ? printf("Calling hot optimize...\n") : 0;
r = db->hot_optimize(db, NULL, NULL);
assert(r == 0);
verbose ? printf("HOT Finished!\n") : 0;
for (unsigned int i = 0; i < size; ++i) {
assert(x_results[i] == 1);
}
verbose ? printf("Leaves hit = %u\n", leaf_hits) :0;
toku_free(x_results);
}
///
int
test_main(int argc, char * const argv[])
{
int r = 0;
default_parse_args(argc, argv);
hot_test_setup();
// Create and Open the Database/BRT
DB *db = NULL;
const unsigned int BIG = 4000000;
const unsigned int SMALL = 10;
const unsigned int NONE = 0;
hot_create_db(&db, "none.db");
hot_test(db, NONE);
hot_create_db(&db, "small.db");
hot_test(db, SMALL);
hot_create_db(&db, "big.db");
hot_test(db, BIG);
verbose ? printf("Exiting Test.\n") : 0;
return r;
}
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35324 2011-10-04 01:48:45Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2009 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2009 Tokutek Inc. All rights reserved."
#ident "$Id: env_startup.c 20778 2010-05-28 20:38:42Z yfogel $" #ident "$Id$"
/* Purpose of this test is to verify that a failed assert will /* Purpose of this test is to verify that a failed assert will
* cause a panic, which should be visible via engine status. * cause a panic, which should be visible via engine status.
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress1.c 35109 2011-09-27 18:41:25Z leifwalsh $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */ /* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress2.c 35151 2011-09-29 01:32:27Z zardosht $" #ident "$Id$"
#include "test.h" #include "test.h"
#include <stdio.h> #include <stdio.h>
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#ident "$Id: test_stress7.c 38515 2012-01-05 20:48:10Z leifwalsh $"
#include "test.h"
#include <stdio.h>
#include <stdlib.h>
#include <toku_pthread.h>
#include <unistd.h>
#include <memory.h>
#include <sys/stat.h>
#include <db.h>
#include "threaded_stress_test_helpers.h"
static void
stress_table(DB_ENV *env, DB **dbp, struct cli_args *cli_args) {
int n = cli_args->num_elements;
//
// do insertions and queries with a loader lying around doing stuff
//
if (verbose) printf("starting creation of pthreads\n");
const int num_threads = 4 + cli_args->num_update_threads + cli_args->num_ptquery_threads;
struct arg myargs[num_threads];
for (int i = 0; i < num_threads; i++) {
arg_init(&myargs[i], n, dbp, env, cli_args);
}
// make the forward fast scanner
myargs[0].fast = TRUE;
myargs[0].fwd = TRUE;
myargs[0].operation = scan_op;
// make the backward slow scanner
myargs[1].fast = FALSE;
myargs[1].fwd = FALSE;
myargs[1].operation = scan_op;
// make the guy that runs HOT in the background
myargs[2].operation = hot_op;
myargs[3].operation = keyrange_op;
for (int i = 4; i < 4 + cli_args->num_update_threads; ++i) {
myargs[i].operation = update_op;
}
// make the guy that does point queries
for (int i = 4 + cli_args->num_update_threads; i < num_threads; i++) {
myargs[i].operation = ptquery_op;
}
run_workers(myargs, num_threads, cli_args->time_of_test, false);
}
int
test_main(int argc, char *const argv[]) {
struct cli_args args = DEFAULT_ARGS;
// let's make default checkpointing period really slow
args.checkpointing_period = 1;
parse_stress_test_args(argc, argv, &args);
stress_test_main(&args);
return 0;
}
...@@ -555,6 +555,14 @@ static int UU() update_broadcast_op(DB_ENV *UU(env), DB **dbp, DB_TXN *txn, ARG ...@@ -555,6 +555,14 @@ static int UU() update_broadcast_op(DB_ENV *UU(env), DB **dbp, DB_TXN *txn, ARG
return r; return r;
} }
static int UU() hot_op(DB_ENV *UU(env), DB **dbp, DB_TXN *UU(txn), ARG UU(arg)) {
int r;
DB* db = *dbp;
r = db->hot_optimize(db, NULL, NULL);
CKERR(r);
return r;
}
static int UU() remove_and_recreate_me(DB_ENV *env, DB **dbp, DB_TXN *UU(txn), ARG UU(arg)) { static int UU() remove_and_recreate_me(DB_ENV *env, DB **dbp, DB_TXN *UU(txn), ARG UU(arg)) {
int r; int r;
r = (*dbp)->close(*dbp, 0); CKERR(r); r = (*dbp)->close(*dbp, 0); CKERR(r);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment