Commit cbab8d8e authored by John Esmet's avatar John Esmet

FT-253 Remove remaining brt / brt_header era names

parent 061724be
...@@ -3,7 +3,6 @@ cmake_policy(SET CMP0012 NEW) ...@@ -3,7 +3,6 @@ cmake_policy(SET CMP0012 NEW)
## these tests shouldn't run with valgrind ## these tests shouldn't run with valgrind
list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
ft/bnc-insert-benchmark ft/bnc-insert-benchmark
ft/brt-serialize-benchmark
ft/ft_loader-test-extractor-1 ft/ft_loader-test-extractor-1
ft/ft_loader-test-extractor-2 ft/ft_loader-test-extractor-2
ft/ft_loader-test-extractor-3 ft/ft_loader-test-extractor-3
......
...@@ -145,7 +145,7 @@ void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t ...@@ -145,7 +145,7 @@ void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t
// Effect: Allocate a block of the specified size at a particular offset. // Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong. // Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use. // The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
// Usage note: To allocate several blocks (e.g., when opening a BRT), use block_allocator_alloc_blocks_at(). // Usage note: To allocate several blocks (e.g., when opening a FT), use block_allocator_alloc_blocks_at().
// Requires: The resulting block may not overlap any other allocated block. // Requires: The resulting block may not overlap any other allocated block.
// And the offset must be a multiple of the block alignment. // And the offset must be a multiple of the block alignment.
// Parameters: // Parameters:
......
...@@ -109,8 +109,8 @@ void toku_blocktable_create_new(BLOCK_TABLE *btp); ...@@ -109,8 +109,8 @@ void toku_blocktable_create_new(BLOCK_TABLE *btp);
int toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer); int toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer);
void toku_blocktable_destroy(BLOCK_TABLE *btp); void toku_blocktable_destroy(BLOCK_TABLE *btp);
void toku_ft_lock(FT h); void toku_ft_lock(FT ft);
void toku_ft_unlock(FT h); void toku_ft_unlock(FT ft);
void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt); void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt);
void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd); void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd);
...@@ -118,15 +118,15 @@ void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt); ...@@ -118,15 +118,15 @@ void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt);
void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd); void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd);
//Blocknums //Blocknums
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h); void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT ft);
void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT h); void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT ft);
void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, bool for_checkpoint); void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT ft, bool for_checkpoint);
void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b); void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b);
void toku_block_verify_no_data_blocks_except_root(BLOCK_TABLE bt, BLOCKNUM root); void toku_block_verify_no_data_blocks_except_root(BLOCK_TABLE bt, BLOCKNUM root);
void toku_free_unused_blocknums(BLOCK_TABLE bt, BLOCKNUM root); void toku_free_unused_blocknums(BLOCK_TABLE bt, BLOCKNUM root);
void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt); void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt);
void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h, int fd); void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft, int fd);
void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h); void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft);
void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size); void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size);
//Blocks and Blocknums //Blocks and Blocknums
......
...@@ -104,16 +104,16 @@ ftnode_get_key_and_fullhash( ...@@ -104,16 +104,16 @@ ftnode_get_key_and_fullhash(
uint32_t* fullhash, uint32_t* fullhash,
void* extra) void* extra)
{ {
FT h = (FT) extra; FT ft = (FT) extra;
BLOCKNUM name; BLOCKNUM name;
toku_allocate_blocknum(h->blocktable, &name, h); toku_allocate_blocknum(ft->blocktable, &name, ft);
*cachekey = name; *cachekey = name;
*fullhash = toku_cachetable_hash(h->cf, name); *fullhash = toku_cachetable_hash(ft->cf, name);
} }
void void
cachetable_put_empty_node_with_dep_nodes( cachetable_put_empty_node_with_dep_nodes(
FT h, FT ft,
uint32_t num_dependent_nodes, uint32_t num_dependent_nodes,
FTNODE* dependent_nodes, FTNODE* dependent_nodes,
BLOCKNUM* name, //output BLOCKNUM* name, //output
...@@ -129,12 +129,12 @@ cachetable_put_empty_node_with_dep_nodes( ...@@ -129,12 +129,12 @@ cachetable_put_empty_node_with_dep_nodes(
} }
toku_cachetable_put_with_dep_pairs( toku_cachetable_put_with_dep_pairs(
h->cf, ft->cf,
ftnode_get_key_and_fullhash, ftnode_get_key_and_fullhash,
new_node, new_node,
make_pair_attr(sizeof(FTNODE)), make_pair_attr(sizeof(FTNODE)),
get_write_callbacks_for_node(h), get_write_callbacks_for_node(ft),
h, ft,
num_dependent_nodes, num_dependent_nodes,
dependent_pairs, dependent_pairs,
dependent_dirty_bits, dependent_dirty_bits,
...@@ -319,7 +319,7 @@ toku_pin_ftnode_for_query( ...@@ -319,7 +319,7 @@ toku_pin_ftnode_for_query(
void void
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
FT h, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, FTNODE_FETCH_EXTRA bfe,
...@@ -338,12 +338,12 @@ toku_pin_ftnode_with_dep_nodes( ...@@ -338,12 +338,12 @@ toku_pin_ftnode_with_dep_nodes(
} }
int r = toku_cachetable_get_and_pin_with_dep_pairs( int r = toku_cachetable_get_and_pin_with_dep_pairs(
h->cf, ft->cf,
blocknum, blocknum,
fullhash, fullhash,
&node_v, &node_v,
NULL, NULL,
get_write_callbacks_for_node(h), get_write_callbacks_for_node(ft),
toku_ftnode_fetch_callback, toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback, toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
...@@ -356,7 +356,7 @@ toku_pin_ftnode_with_dep_nodes( ...@@ -356,7 +356,7 @@ toku_pin_ftnode_with_dep_nodes(
invariant_zero(r); invariant_zero(r);
FTNODE node = (FTNODE) node_v; FTNODE node = (FTNODE) node_v;
if (lock_type != PL_READ && node->height > 0 && move_messages) { if (lock_type != PL_READ && node->height > 0 && move_messages) {
toku_move_ftnode_messages_to_stale(h, node); toku_move_ftnode_messages_to_stale(ft, node);
} }
*node_p = node; *node_p = node;
} }
......
...@@ -103,7 +103,7 @@ PATENT RIGHTS GRANT: ...@@ -103,7 +103,7 @@ PATENT RIGHTS GRANT:
*/ */
void void
cachetable_put_empty_node_with_dep_nodes( cachetable_put_empty_node_with_dep_nodes(
FT h, FT ft,
uint32_t num_dependent_nodes, uint32_t num_dependent_nodes,
FTNODE* dependent_nodes, FTNODE* dependent_nodes,
BLOCKNUM* name, //output BLOCKNUM* name, //output
...@@ -118,7 +118,7 @@ cachetable_put_empty_node_with_dep_nodes( ...@@ -118,7 +118,7 @@ cachetable_put_empty_node_with_dep_nodes(
*/ */
void void
create_new_ftnode_with_dep_nodes( create_new_ftnode_with_dep_nodes(
FT h, FT ft,
FTNODE *result, FTNODE *result,
int height, int height,
int n_children, int n_children,
...@@ -156,7 +156,7 @@ toku_pin_ftnode_for_query( ...@@ -156,7 +156,7 @@ toku_pin_ftnode_for_query(
// Pins an ftnode without dependent pairs // Pins an ftnode without dependent pairs
void toku_pin_ftnode( void toku_pin_ftnode(
FT h, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, FTNODE_FETCH_EXTRA bfe,
...@@ -168,7 +168,7 @@ void toku_pin_ftnode( ...@@ -168,7 +168,7 @@ void toku_pin_ftnode(
// Pins an ftnode with dependent pairs // Pins an ftnode with dependent pairs
// Unlike toku_pin_ftnode_for_query, this function blocks until the node is pinned. // Unlike toku_pin_ftnode_for_query, this function blocks until the node is pinned.
void toku_pin_ftnode_with_dep_nodes( void toku_pin_ftnode_with_dep_nodes(
FT h, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, FTNODE_FETCH_EXTRA bfe,
...@@ -188,7 +188,7 @@ int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pai ...@@ -188,7 +188,7 @@ int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pai
/** /**
* Effect: Unpin an ftnode. * Effect: Unpin an ftnode.
*/ */
void toku_unpin_ftnode(FT h, FTNODE node); void toku_unpin_ftnode(FT ft, FTNODE node);
void toku_unpin_ftnode_read_only(FT ft, FTNODE node); void toku_unpin_ftnode_read_only(FT ft, FTNODE node);
// Effect: Swaps pair values of two pinned nodes // Effect: Swaps pair values of two pinned nodes
......
...@@ -115,7 +115,7 @@ typedef struct flusher_advice FLUSHER_ADVICE; ...@@ -115,7 +115,7 @@ typedef struct flusher_advice FLUSHER_ADVICE;
* Cleaner thread merging leaf nodes: follow down to a key * Cleaner thread merging leaf nodes: follow down to a key
* Hot optimize table: follow down to the right of a key * Hot optimize table: follow down to the right of a key
*/ */
typedef int (*FA_PICK_CHILD)(FT h, FTNODE parent, void* extra); typedef int (*FA_PICK_CHILD)(FT ft, FTNODE parent, void* extra);
/** /**
* Decide whether to call `toku_ft_flush_some_child` on the child if it is * Decide whether to call `toku_ft_flush_some_child` on the child if it is
...@@ -139,7 +139,7 @@ typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(FTNODE child, void* extra); ...@@ -139,7 +139,7 @@ typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(FTNODE child, void* extra);
* Hot optimize table: just do the merge * Hot optimize table: just do the merge
*/ */
typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa, typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa,
FT h, FT ft,
FTNODE parent, FTNODE parent,
int childnum, int childnum,
FTNODE child, FTNODE child,
...@@ -172,7 +172,7 @@ typedef void (*FA_UPDATE_STATUS)(FTNODE child, int dirtied, void* extra); ...@@ -172,7 +172,7 @@ typedef void (*FA_UPDATE_STATUS)(FTNODE child, int dirtied, void* extra);
* by `ft_split_child`. If -1 is returned, `ft_split_child` defaults to * by `ft_split_child`. If -1 is returned, `ft_split_child` defaults to
* the old behavior. * the old behavior.
*/ */
typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT h, typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT ft,
FTNODE node, FTNODE node,
int childnuma, int childnuma,
int childnumb, int childnumb,
...@@ -223,14 +223,14 @@ dont_destroy_basement_nodes(void* extra); ...@@ -223,14 +223,14 @@ dont_destroy_basement_nodes(void* extra);
void void
default_merge_child(struct flusher_advice *fa, default_merge_child(struct flusher_advice *fa,
FT h, FT ft,
FTNODE parent, FTNODE parent,
int childnum, int childnum,
FTNODE child, FTNODE child,
void* extra); void* extra);
int int
default_pick_child_after_split(FT h, default_pick_child_after_split(FT ft,
FTNODE parent, FTNODE parent,
int childnuma, int childnuma,
int childnumb, int childnumb,
......
This diff is collapsed.
...@@ -163,7 +163,7 @@ enum split_mode { ...@@ -163,7 +163,7 @@ enum split_mode {
// Given pinned node and pinned child, split child into two // Given pinned node and pinned child, split child into two
// and update node with information about its new child. // and update node with information about its new child.
void toku_ft_split_child( void toku_ft_split_child(
FT h, FT ft,
FTNODE node, FTNODE node,
int childnum, int childnum,
FTNODE child, FTNODE child,
...@@ -189,7 +189,7 @@ void toku_ft_merge_child( ...@@ -189,7 +189,7 @@ void toku_ft_merge_child(
// TODO: Rename toku_ft_leaf_split // TODO: Rename toku_ft_leaf_split
void void
ftleaf_split( ftleaf_split(
FT h, FT ft,
FTNODE node, FTNODE node,
FTNODE *nodea, FTNODE *nodea,
FTNODE *nodeb, FTNODE *nodeb,
...@@ -212,7 +212,7 @@ ftleaf_split( ...@@ -212,7 +212,7 @@ ftleaf_split(
void void
// TODO: Rename toku_ft_nonleaf_split // TODO: Rename toku_ft_nonleaf_split
ft_nonleaf_split( ft_nonleaf_split(
FT h, FT ft,
FTNODE node, FTNODE node,
FTNODE *nodea, FTNODE *nodea,
FTNODE *nodeb, FTNODE *nodeb,
......
...@@ -169,7 +169,7 @@ hot_set_start_key(struct hot_flusher_extra *flusher, const DBT* start) ...@@ -169,7 +169,7 @@ hot_set_start_key(struct hot_flusher_extra *flusher, const DBT* start)
} }
static int static int
hot_just_pick_child(FT h, hot_just_pick_child(FT ft,
FTNODE parent, FTNODE parent,
struct hot_flusher_extra *flusher) struct hot_flusher_extra *flusher)
{ {
...@@ -186,8 +186,8 @@ hot_just_pick_child(FT h, ...@@ -186,8 +186,8 @@ hot_just_pick_child(FT h,
// Find the pivot boundary. // Find the pivot boundary.
childnum = toku_ftnode_hot_next_child(parent, childnum = toku_ftnode_hot_next_child(parent,
&flusher->highest_pivot_key, &flusher->highest_pivot_key,
&h->cmp_descriptor, &ft->cmp_descriptor,
h->compare_fun); ft->compare_fun);
} }
return childnum; return childnum;
...@@ -209,12 +209,12 @@ hot_update_flusher_keys(FTNODE parent, ...@@ -209,12 +209,12 @@ hot_update_flusher_keys(FTNODE parent,
// Picks which child toku_ft_flush_some_child will use for flushing and // Picks which child toku_ft_flush_some_child will use for flushing and
// recursion. // recursion.
static int static int
hot_pick_child(FT h, hot_pick_child(FT ft,
FTNODE parent, FTNODE parent,
void *extra) void *extra)
{ {
struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra; struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
int childnum = hot_just_pick_child(h, parent, flusher); int childnum = hot_just_pick_child(ft, parent, flusher);
// Now we determine the percentage of the tree flushed so far. // Now we determine the percentage of the tree flushed so far.
...@@ -244,14 +244,14 @@ hot_update_status(FTNODE UU(child), ...@@ -244,14 +244,14 @@ hot_update_status(FTNODE UU(child),
// one to flush into. This gives it a chance to do that, and update the // one to flush into. This gives it a chance to do that, and update the
// keys it maintains. // keys it maintains.
static int static int
hot_pick_child_after_split(FT h, hot_pick_child_after_split(FT ft,
FTNODE parent, FTNODE parent,
int childnuma, int childnuma,
int childnumb, int childnumb,
void *extra) void *extra)
{ {
struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra; struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
int childnum = hot_just_pick_child(h, parent, flusher); int childnum = hot_just_pick_child(ft, parent, flusher);
assert(childnum == childnuma || childnum == childnumb); assert(childnum == childnuma || childnum == childnumb);
hot_update_flusher_keys(parent, childnum, flusher); hot_update_flusher_keys(parent, childnum, flusher);
if (parent->height == 1) { if (parent->height == 1) {
......
...@@ -324,11 +324,11 @@ int toku_serialize_ftnode_to_memory (FTNODE node, ...@@ -324,11 +324,11 @@ int toku_serialize_ftnode_to_memory (FTNODE node,
/*out*/ size_t *n_bytes_to_write, /*out*/ size_t *n_bytes_to_write,
/*out*/ size_t *n_uncompressed_bytes, /*out*/ size_t *n_uncompressed_bytes,
/*out*/ char **bytes_to_write); /*out*/ char **bytes_to_write);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint); int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT ft, bool for_checkpoint);
int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized, int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized,
FT h, bool for_checkpoint); FT ft, bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized); void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT h); int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe); int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe); int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, uint32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe); int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, uint32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe);
...@@ -353,7 +353,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version); ...@@ -353,7 +353,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
void read_block_from_fd_into_rbuf( void read_block_from_fd_into_rbuf(
int fd, int fd,
BLOCKNUM blocknum, BLOCKNUM blocknum,
FT h, FT ft,
struct rbuf *rb struct rbuf *rb
); );
...@@ -492,7 +492,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, in ...@@ -492,7 +492,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, in
int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen); int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen);
void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t); void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t);
void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info); void toku_ft_root_put_msg(FT ft, FT_MSG msg, txn_gc_info *gc_info);
void void
toku_get_node_for_verify( toku_get_node_for_verify(
...@@ -667,8 +667,8 @@ void toku_ft_get_status(FT_STATUS); ...@@ -667,8 +667,8 @@ void toku_ft_get_status(FT_STATUS);
void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra); void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra);
int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT h) __attribute__((nonnull)); int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft) __attribute__((nonnull));
int toku_upgrade_msn_from_root_to_header(int fd, FT h) __attribute__((nonnull)); int toku_upgrade_msn_from_root_to_header(int fd, FT ft) __attribute__((nonnull));
// A callback function is invoked with the key, and the data. // A callback function is invoked with the key, and the data.
// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns. // The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns.
......
...@@ -600,7 +600,7 @@ toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node) ...@@ -600,7 +600,7 @@ toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node)
} else if (bfe->range_lock_left_key.data == nullptr) { } else if (bfe->range_lock_left_key.data == nullptr) {
return -1; return -1;
} else { } else {
return toku_ftnode_which_child(node, &bfe->range_lock_left_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun); return toku_ftnode_which_child(node, &bfe->range_lock_left_key, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun);
} }
} }
...@@ -613,7 +613,7 @@ toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node) ...@@ -613,7 +613,7 @@ toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node)
} else if (bfe->range_lock_right_key.data == nullptr) { } else if (bfe->range_lock_right_key.data == nullptr) {
return -1; return -1;
} else { } else {
return toku_ftnode_which_child(node, &bfe->range_lock_right_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun); return toku_ftnode_which_child(node, &bfe->range_lock_right_key, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun);
} }
} }
...@@ -763,7 +763,7 @@ void toku_ftnode_flush_callback( ...@@ -763,7 +763,7 @@ void toku_ftnode_flush_callback(
bool is_clone bool is_clone
) )
{ {
FT h = (FT) extraargs; FT ft = (FT) extraargs;
FTNODE ftnode = (FTNODE) ftnode_v; FTNODE ftnode = (FTNODE) ftnode_v;
FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data; FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data;
assert(ftnode->blocknum.b == blocknum.b); assert(ftnode->blocknum.b == blocknum.b);
...@@ -772,14 +772,14 @@ void toku_ftnode_flush_callback( ...@@ -772,14 +772,14 @@ void toku_ftnode_flush_callback(
toku_ftnode_assert_fully_in_memory(ftnode); toku_ftnode_assert_fully_in_memory(ftnode);
if (height > 0 && !is_clone) { if (height > 0 && !is_clone) {
// cloned nodes already had their stale messages moved, see toku_ftnode_clone_callback() // cloned nodes already had their stale messages moved, see toku_ftnode_clone_callback()
toku_move_ftnode_messages_to_stale(h, ftnode); toku_move_ftnode_messages_to_stale(ft, ftnode);
} else if (height == 0) { } else if (height == 0) {
toku_ftnode_leaf_run_gc(h, ftnode); toku_ftnode_leaf_run_gc(ft, ftnode);
if (!is_clone) { if (!is_clone) {
toku_ftnode_update_disk_stats(ftnode, h, for_checkpoint); toku_ftnode_update_disk_stats(ftnode, ft, for_checkpoint);
} }
} }
int r = toku_serialize_ftnode_to(fd, ftnode->blocknum, ftnode, ndd, !is_clone, h, for_checkpoint); int r = toku_serialize_ftnode_to(fd, ftnode->blocknum, ftnode, ndd, !is_clone, ft, for_checkpoint);
assert_zero(r); assert_zero(r);
ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION; ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION;
} }
...@@ -800,7 +800,7 @@ void toku_ftnode_flush_callback( ...@@ -800,7 +800,7 @@ void toku_ftnode_flush_callback(
for (int i = 0; i < ftnode->n_children; i++) { for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode,i) == PT_AVAIL) { if (BP_STATE(ftnode,i) == PT_AVAIL) {
BASEMENTNODE bn = BLB(ftnode, i); BASEMENTNODE bn = BLB(ftnode, i);
toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta); toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
} }
} }
} }
...@@ -1125,11 +1125,11 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { ...@@ -1125,11 +1125,11 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// we can possibly require is a single basement node // we can possibly require is a single basement node
// we find out what basement node the query cares about // we find out what basement node the query cares about
// and check if it is available // and check if it is available
paranoid_invariant(bfe->h->compare_fun); paranoid_invariant(bfe->ft->compare_fun);
paranoid_invariant(bfe->search); paranoid_invariant(bfe->search);
bfe->child_to_read = toku_ft_search_which_child( bfe->child_to_read = toku_ft_search_which_child(
&bfe->h->cmp_descriptor, &bfe->ft->cmp_descriptor,
bfe->h->compare_fun, bfe->ft->compare_fun,
node, node,
bfe->search bfe->search
); );
...@@ -1154,7 +1154,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) { ...@@ -1154,7 +1154,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// we can possibly require is a single basement node // we can possibly require is a single basement node
// we find out what basement node the query cares about // we find out what basement node the query cares about
// and check if it is available // and check if it is available
paranoid_invariant(bfe->h->compare_fun); paranoid_invariant(bfe->ft->compare_fun);
if (node->height == 0) { if (node->height == 0) {
int left_child = toku_bfe_leftmost_child_wanted(bfe, node); int left_child = toku_bfe_leftmost_child_wanted(bfe, node);
int right_child = toku_bfe_rightmost_child_wanted(bfe, node); int right_child = toku_bfe_rightmost_child_wanted(bfe, node);
...@@ -1342,7 +1342,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar ...@@ -1342,7 +1342,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar
if (r == TOKUDB_BAD_CHECKSUM) { if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, fprintf(stderr,
"Checksum failure while reading node partition in file %s.\n", "Checksum failure while reading node partition in file %s.\n",
toku_cachefile_fname_in_env(bfe->h->cf)); toku_cachefile_fname_in_env(bfe->ft->cf));
} else { } else {
fprintf(stderr, fprintf(stderr,
"Error while reading node partition %d\n", "Error while reading node partition %d\n",
...@@ -1363,9 +1363,9 @@ int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_he ...@@ -1363,9 +1363,9 @@ int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_he
return be.compare_fun(&db, &kdbt, key); return be.compare_fun(&db, &kdbt, key);
} }
void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) { void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT ft) {
bfe->type = ftnode_fetch_all; bfe->type = ftnode_fetch_all;
bfe->h = h; bfe->ft = ft;
bfe->search = nullptr; bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key); toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key); toku_init_dbt(&bfe->range_lock_right_key);
...@@ -1380,12 +1380,12 @@ void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) { ...@@ -1380,12 +1380,12 @@ void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) {
bfe->decompress_time = 0; bfe->decompress_time = 0;
} }
void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT h, void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT ft,
const DBT *left, const DBT *right, const DBT *left, const DBT *right,
bool disable_prefetching, bool read_all_partitions) { bool disable_prefetching, bool read_all_partitions) {
paranoid_invariant(h->h->type == FT_CURRENT); paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_keymatch; bfe->type = ftnode_fetch_keymatch;
bfe->h = h; bfe->ft = ft;
bfe->search = nullptr; bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key); toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key); toku_init_dbt(&bfe->range_lock_right_key);
...@@ -1407,13 +1407,13 @@ void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT h, ...@@ -1407,13 +1407,13 @@ void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT h,
bfe->decompress_time = 0; bfe->decompress_time = 0;
} }
void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT h, ft_search *search, void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT ft, ft_search *search,
const DBT *left, const DBT *right, const DBT *left, const DBT *right,
bool left_is_neg_infty, bool right_is_pos_infty, bool left_is_neg_infty, bool right_is_pos_infty,
bool disable_prefetching, bool read_all_partitions) { bool disable_prefetching, bool read_all_partitions) {
paranoid_invariant(h->h->type == FT_CURRENT); paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_subset; bfe->type = ftnode_fetch_subset;
bfe->h = h; bfe->ft = ft;
bfe->search = search; bfe->search = search;
toku_init_dbt(&bfe->range_lock_left_key); toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key); toku_init_dbt(&bfe->range_lock_right_key);
...@@ -1437,7 +1437,7 @@ void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT h, ft_search *s ...@@ -1437,7 +1437,7 @@ void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT h, ft_search *s
void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) { void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) {
paranoid_invariant(ft->h->type == FT_CURRENT); paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_none; bfe->type = ftnode_fetch_none;
bfe->h = ft; bfe->ft = ft;
bfe->search = nullptr; bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key); toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key); toku_init_dbt(&bfe->range_lock_right_key);
...@@ -1455,7 +1455,7 @@ void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) { ...@@ -1455,7 +1455,7 @@ void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) {
void fill_bfe_for_prefetch(struct ftnode_fetch_extra *bfe, FT ft, struct ft_cursor *cursor) { void fill_bfe_for_prefetch(struct ftnode_fetch_extra *bfe, FT ft, struct ft_cursor *cursor) {
paranoid_invariant(ft->h->type == FT_CURRENT); paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_prefetch; bfe->type = ftnode_fetch_prefetch;
bfe->h = ft; bfe->ft = ft;
bfe->search = nullptr; bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key); toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key); toku_init_dbt(&bfe->range_lock_right_key);
...@@ -3175,9 +3175,8 @@ toku_ft_handle_open_with_dict_id( ...@@ -3175,9 +3175,8 @@ toku_ft_handle_open_with_dict_id(
DICTIONARY_ID DICTIONARY_ID
toku_ft_get_dictionary_id(FT_HANDLE ft_handle) { toku_ft_get_dictionary_id(FT_HANDLE ft_handle) {
FT h = ft_handle->ft; FT ft = ft_handle->ft;
DICTIONARY_ID dict_id = h->dict_id; return ft->dict_id;
return dict_id;
} }
void toku_ft_set_flags(FT_HANDLE ft_handle, unsigned int flags) { void toku_ft_set_flags(FT_HANDLE ft_handle, unsigned int flags) {
......
...@@ -184,7 +184,7 @@ void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn); ...@@ -184,7 +184,7 @@ void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn);
int int
toku_ft_handle_open_with_dict_id( toku_ft_handle_open_with_dict_id(
FT_HANDLE t, FT_HANDLE ft_h,
const char *fname_in_env, const char *fname_in_env,
int is_create, int is_create,
int only_create, int only_create,
......
...@@ -472,19 +472,19 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN ...@@ -472,19 +472,19 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
// max_acceptable_lsn is the latest acceptable checkpointed version of the file. // max_acceptable_lsn is the latest acceptable checkpointed version of the file.
{ {
{ {
FT h; FT ft;
if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) { if ((ft = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = h; *header = ft;
assert(ft_handle->options.update_fun == h->update_fun); assert(ft_handle->options.update_fun == ft->update_fun);
assert(ft_handle->options.compare_fun == h->compare_fun); assert(ft_handle->options.compare_fun == ft->compare_fun);
return 0; return 0;
} }
} }
FT h = nullptr; FT ft = nullptr;
int r; int r;
{ {
int fd = toku_cachefile_get_fd(cf); int fd = toku_cachefile_get_fd(cf);
r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &h); r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft);
if (r == TOKUDB_BAD_CHECKSUM) { if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf)); fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else assert(false); // make absolutely sure we crash before doing anything else
...@@ -492,12 +492,12 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN ...@@ -492,12 +492,12 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
} }
if (r!=0) return r; if (r!=0) return r;
// GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized. // GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized.
invariant_notnull(h); invariant_notnull(ft);
h->cf = cf; ft->cf = cf;
h->compare_fun = ft_handle->options.compare_fun; ft->compare_fun = ft_handle->options.compare_fun;
h->update_fun = ft_handle->options.update_fun; ft->update_fun = ft_handle->options.update_fun;
toku_cachefile_set_userdata(cf, toku_cachefile_set_userdata(cf,
(void*)h, reinterpret_cast<void *>(ft),
ft_log_fassociate_during_checkpoint, ft_log_fassociate_during_checkpoint,
ft_close, ft_close,
ft_free, ft_free,
...@@ -506,7 +506,7 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN ...@@ -506,7 +506,7 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
ft_end_checkpoint, ft_end_checkpoint,
ft_note_pin_by_checkpoint, ft_note_pin_by_checkpoint,
ft_note_unpin_by_checkpoint); ft_note_unpin_by_checkpoint);
*header = h; *header = ft;
return 0; return 0;
} }
...@@ -548,12 +548,12 @@ void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn) { ...@@ -548,12 +548,12 @@ void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn) {
} }
// Verifies there exists exactly one ft handle and returns it. // Verifies there exists exactly one ft handle and returns it.
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h) { FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft) {
FT_HANDLE ft_handle_ret = NULL; FT_HANDLE ft_handle_ret = NULL;
toku_ft_grab_reflock(h); toku_ft_grab_reflock(ft);
assert(toku_list_num_elements_est(&h->live_ft_handles) == 1); assert(toku_list_num_elements_est(&ft->live_ft_handles) == 1);
ft_handle_ret = toku_list_struct(toku_list_head(&h->live_ft_handles), struct ft_handle, live_ft_handle_link); ft_handle_ret = toku_list_struct(toku_list_head(&ft->live_ft_handles), struct ft_handle, live_ft_handle_link);
toku_ft_release_reflock(h); toku_ft_release_reflock(ft);
return ft_handle_ret; return ft_handle_ret;
} }
...@@ -628,27 +628,27 @@ toku_ft_init(FT ft, ...@@ -628,27 +628,27 @@ toku_ft_init(FT ft,
// Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.) // Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.)
static int static int
ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_h) { ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_ft) {
FT_HANDLE t; FT_HANDLE ft_handle;
assert(old_h->dict_id.dictid != DICTIONARY_ID_NONE.dictid); assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_ft_handle_create(&t); toku_ft_handle_create(&ft_handle);
toku_ft_set_bt_compare(t, old_h->compare_fun); toku_ft_set_bt_compare(ft_handle, old_ft->compare_fun);
toku_ft_set_update(t, old_h->update_fun); toku_ft_set_update(ft_handle, old_ft->update_fun);
toku_ft_handle_set_nodesize(t, old_h->h->nodesize); toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize);
toku_ft_handle_set_basementnodesize(t, old_h->h->basementnodesize); toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize);
toku_ft_handle_set_compression_method(t, old_h->h->compression_method); toku_ft_handle_set_compression_method(ft_handle, old_ft->h->compression_method);
toku_ft_handle_set_fanout(t, old_h->h->fanout); toku_ft_handle_set_fanout(ft_handle, old_ft->h->fanout);
CACHETABLE ct = toku_cachefile_get_cachetable(old_h->cf); CACHETABLE ct = toku_cachefile_get_cachetable(old_ft->cf);
int r = toku_ft_handle_open_with_dict_id(t, fname_in_env, 0, 0, ct, txn, old_h->dict_id); int r = toku_ft_handle_open_with_dict_id(ft_handle, fname_in_env, 0, 0, ct, txn, old_ft->dict_id);
if (r != 0) { if (r != 0) {
goto cleanup; goto cleanup;
} }
assert(t->ft->dict_id.dictid == old_h->dict_id.dictid); assert(ft_handle->ft->dict_id.dictid == old_ft->dict_id.dictid);
*new_ftp = t; *new_ftp = ft_handle;
cleanup: cleanup:
if (r != 0) { if (r != 0) {
toku_ft_handle_close(t); toku_ft_handle_close(ft_handle);
} }
return r; return r;
} }
...@@ -656,81 +656,81 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX ...@@ -656,81 +656,81 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX
// This function performs most of the work to redirect a dictionary to different file. // This function performs most of the work to redirect a dictionary to different file.
// It is called for redirect and to abort a redirect. (This function is almost its own inverse.) // It is called for redirect and to abort a redirect. (This function is almost its own inverse.)
static int static int
dictionary_redirect_internal(const char *dst_fname_in_env, FT src_h, TOKUTXN txn, FT *dst_hp) { dictionary_redirect_internal(const char *dst_fname_in_env, FT src_ft, TOKUTXN txn, FT *dst_ftp) {
int r; int r;
FILENUM src_filenum = toku_cachefile_filenum(src_h->cf); FILENUM src_filenum = toku_cachefile_filenum(src_ft->cf);
FILENUM dst_filenum = FILENUM_NONE; FILENUM dst_filenum = FILENUM_NONE;
FT dst_h = NULL; FT dst_ft = NULL;
struct toku_list *list; struct toku_list *list;
// open a dummy ft based off of // open a dummy ft based off of
// dst_fname_in_env to get the header // dst_fname_in_env to get the header
// then we will change all the ft's to have // then we will change all the ft's to have
// their headers point to dst_h instead of src_h // their headers point to dst_ft instead of src_ft
FT_HANDLE tmp_dst_ft = NULL; FT_HANDLE tmp_dst_ft = NULL;
r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_h); r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_ft);
if (r != 0) { if (r != 0) {
goto cleanup; goto cleanup;
} }
dst_h = tmp_dst_ft->ft; dst_ft = tmp_dst_ft->ft;
// some sanity checks on dst_filenum // some sanity checks on dst_filenum
dst_filenum = toku_cachefile_filenum(dst_h->cf); dst_filenum = toku_cachefile_filenum(dst_ft->cf);
assert(dst_filenum.fileid!=FILENUM_NONE.fileid); assert(dst_filenum.fileid!=FILENUM_NONE.fileid);
assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file. assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file.
// for each live ft_handle, ft_handle->ft is currently src_h // for each live ft_handle, ft_handle->ft is currently src_ft
// we want to change it to dummy_dst // we want to change it to dummy_dst
toku_ft_grab_reflock(src_h); toku_ft_grab_reflock(src_ft);
while (!toku_list_empty(&src_h->live_ft_handles)) { while (!toku_list_empty(&src_ft->live_ft_handles)) {
list = src_h->live_ft_handles.next; list = src_ft->live_ft_handles.next;
FT_HANDLE src_handle = NULL; FT_HANDLE src_handle = NULL;
src_handle = toku_list_struct(list, struct ft_handle, live_ft_handle_link); src_handle = toku_list_struct(list, struct ft_handle, live_ft_handle_link);
toku_list_remove(&src_handle->live_ft_handle_link); toku_list_remove(&src_handle->live_ft_handle_link);
toku_ft_note_ft_handle_open(dst_h, src_handle); toku_ft_note_ft_handle_open(dst_ft, src_handle);
if (src_handle->redirect_callback) { if (src_handle->redirect_callback) {
src_handle->redirect_callback(src_handle, src_handle->redirect_callback_extra); src_handle->redirect_callback(src_handle, src_handle->redirect_callback_extra);
} }
} }
assert(dst_h); assert(dst_ft);
// making sure that we are not leaking src_h // making sure that we are not leaking src_ft
assert(toku_ft_needed_unlocked(src_h)); assert(toku_ft_needed_unlocked(src_ft));
toku_ft_release_reflock(src_h); toku_ft_release_reflock(src_ft);
toku_ft_handle_close(tmp_dst_ft); toku_ft_handle_close(tmp_dst_ft);
*dst_hp = dst_h; *dst_ftp = dst_ft;
cleanup: cleanup:
return r; return r;
} }
//This is the 'abort redirect' function. The redirect of old_h to new_h was done //This is the 'abort redirect' function. The redirect of old_ft to new_ft was done
//and now must be undone, so here we redirect new_h back to old_h. //and now must be undone, so here we redirect new_ft back to old_ft.
int int
toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) { toku_dictionary_redirect_abort(FT old_ft, FT new_ft, TOKUTXN txn) {
char *old_fname_in_env = toku_cachefile_fname_in_env(old_h->cf); char *old_fname_in_env = toku_cachefile_fname_in_env(old_ft->cf);
int r; int r;
{ {
FILENUM old_filenum = toku_cachefile_filenum(old_h->cf); FILENUM old_filenum = toku_cachefile_filenum(old_ft->cf);
FILENUM new_filenum = toku_cachefile_filenum(new_h->cf); FILENUM new_filenum = toku_cachefile_filenum(new_ft->cf);
assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file. assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file.
//No living fts in old header. //No living fts in old header.
toku_ft_grab_reflock(old_h); toku_ft_grab_reflock(old_ft);
assert(toku_list_empty(&old_h->live_ft_handles)); assert(toku_list_empty(&old_ft->live_ft_handles));
toku_ft_release_reflock(old_h); toku_ft_release_reflock(old_ft);
} }
FT dst_h; FT dst_ft;
// redirect back from new_h to old_h // redirect back from new_ft to old_ft
r = dictionary_redirect_internal(old_fname_in_env, new_h, txn, &dst_h); r = dictionary_redirect_internal(old_fname_in_env, new_ft, txn, &dst_ft);
if (r == 0) { if (r == 0) {
assert(dst_h == old_h); assert(dst_ft == old_ft);
} }
return r; return r;
} }
......
...@@ -110,7 +110,7 @@ void toku_ft_grab_reflock(FT ft); ...@@ -110,7 +110,7 @@ void toku_ft_grab_reflock(FT ft);
void toku_ft_release_reflock(FT ft); void toku_ft_release_reflock(FT ft);
void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn); void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn);
void toku_ft_free (FT h); void toku_ft_free (FT ft);
int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header); int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header);
void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live); void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live);
...@@ -122,7 +122,7 @@ bool toku_ft_has_one_reference_unlocked(FT ft); ...@@ -122,7 +122,7 @@ bool toku_ft_has_one_reference_unlocked(FT ft);
// will have to read in the ft in a new cachefile and new FT object. // will have to read in the ft in a new cachefile and new FT object.
void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn); void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn);
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h); FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft);
void toku_ft_note_hot_begin(FT_HANDLE ft_h); void toku_ft_note_hot_begin(FT_HANDLE ft_h);
void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot); void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot);
...@@ -141,18 +141,18 @@ toku_ft_init( ...@@ -141,18 +141,18 @@ toku_ft_init(
int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result)); int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result));
int toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft, TOKUTXN txn); int toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft, TOKUTXN txn);
void toku_reset_root_xid_that_created(FT h, TXNID new_root_xid_that_created); void toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created);
// Reset the root_xid_that_created field to the given value. // Reset the root_xid_that_created field to the given value.
// This redefines which xid created the dictionary. // This redefines which xid created the dictionary.
void toku_ft_add_txn_ref(FT h); void toku_ft_add_txn_ref(FT ft);
void toku_ft_remove_txn_ref(FT h); void toku_ft_remove_txn_ref(FT ft);
void toku_calculate_root_offset_pointer ( FT h, CACHEKEY* root_key, uint32_t *roothash); void toku_calculate_root_offset_pointer (FT ft, CACHEKEY* root_key, uint32_t *roothash);
void toku_ft_set_new_root_blocknum(FT h, CACHEKEY new_root_key); void toku_ft_set_new_root_blocknum(FT ft, CACHEKEY new_root_key);
LSN toku_ft_checkpoint_lsn(FT h) __attribute__ ((warn_unused_result)); LSN toku_ft_checkpoint_lsn(FT ft) __attribute__ ((warn_unused_result));
void toku_ft_stat64 (FT h, struct ftstat64_s *s); void toku_ft_stat64 (FT ft, struct ftstat64_s *s);
void toku_ft_get_fractal_tree_info64 (FT h, struct ftinfo64 *s); void toku_ft_get_fractal_tree_info64 (FT ft, struct ftinfo64 *s);
int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra); int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra);
// unconditionally set the descriptor for an open FT. can't do this when // unconditionally set the descriptor for an open FT. can't do this when
......
This diff is collapsed.
...@@ -642,7 +642,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp, ...@@ -642,7 +642,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
return 0; return 0;
} }
int toku_ft_loader_open (/* out */ FTLOADER *blp, int toku_ft_loader_open (FTLOADER *blp, /* out */
CACHETABLE cachetable, CACHETABLE cachetable,
generate_row_for_put_func g, generate_row_for_put_func g,
DB *src_db, DB *src_db,
...@@ -656,9 +656,9 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp, ...@@ -656,9 +656,9 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
uint64_t reserve_memory_size, uint64_t reserve_memory_size,
bool compress_intermediates, bool compress_intermediates,
bool allow_puts) { bool allow_puts) {
// Effect: called by DB_ENV->create_loader to create a brt loader. // Effect: called by DB_ENV->create_loader to create an ft loader.
// Arguments: // Arguments:
// blp Return the brt loader here. // blp Return a ft loader ("bulk loader") here.
// g The function for generating a row // g The function for generating a row
// src_db The source database. Needed by g. May be NULL if that's ok with g. // src_db The source database. Needed by g. May be NULL if that's ok with g.
// N The number of dbs to create. // N The number of dbs to create.
...@@ -2220,16 +2220,16 @@ struct dbout { ...@@ -2220,16 +2220,16 @@ struct dbout {
int64_t n_translations_limit; int64_t n_translations_limit;
struct translation *translation; struct translation *translation;
toku_mutex_t mutex; toku_mutex_t mutex;
FT h; FT ft;
}; };
static inline void dbout_init(struct dbout *out, FT h) { static inline void dbout_init(struct dbout *out, FT ft) {
out->fd = -1; out->fd = -1;
out->current_off = 0; out->current_off = 0;
out->n_translations = out->n_translations_limit = 0; out->n_translations = out->n_translations_limit = 0;
out->translation = NULL; out->translation = NULL;
toku_mutex_init(&out->mutex, NULL); toku_mutex_init(&out->mutex, NULL);
out->h = h; out->ft = ft;
} }
static inline void dbout_destroy(struct dbout *out) { static inline void dbout_destroy(struct dbout *out) {
...@@ -2615,7 +2615,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2615,7 +2615,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
{ {
invariant(sts.n_subtrees==1); invariant(sts.n_subtrees==1);
out.h->h->root_blocknum = make_blocknum(sts.subtrees[0].block); out.ft->h->root_blocknum = make_blocknum(sts.subtrees[0].block);
toku_free(sts.subtrees); sts.subtrees = NULL; toku_free(sts.subtrees); sts.subtrees = NULL;
// write the descriptor // write the descriptor
...@@ -3037,7 +3037,7 @@ static int write_translation_table (struct dbout *out, long long *off_of_transla ...@@ -3037,7 +3037,7 @@ static int write_translation_table (struct dbout *out, long long *off_of_transla
static int static int
write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk) { write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk) {
int result = 0; int result = 0;
size_t size = toku_serialize_ft_size(out->h->h); size_t size = toku_serialize_ft_size(out->ft->h);
size_t alloced_size = roundup_to_multiple(512, size); size_t alloced_size = roundup_to_multiple(512, size);
struct wbuf wbuf; struct wbuf wbuf;
char *MALLOC_N_ALIGNED(512, alloced_size, buf); char *MALLOC_N_ALIGNED(512, alloced_size, buf);
...@@ -3045,8 +3045,8 @@ write_header (struct dbout *out, long long translation_location_on_disk, long lo ...@@ -3045,8 +3045,8 @@ write_header (struct dbout *out, long long translation_location_on_disk, long lo
result = get_error_errno(); result = get_error_errno();
} else { } else {
wbuf_init(&wbuf, buf, size); wbuf_init(&wbuf, buf, size);
out->h->h->on_disk_stats = out->h->in_memory_stats; out->ft->h->on_disk_stats = out->ft->in_memory_stats;
toku_serialize_ft_to_wbuf(&wbuf, out->h->h, translation_location_on_disk, translation_size_on_disk); toku_serialize_ft_to_wbuf(&wbuf, out->ft->h, translation_location_on_disk, translation_size_on_disk);
for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero
if (wbuf.ndone != size) if (wbuf.ndone != size)
result = EINVAL; result = EINVAL;
......
...@@ -337,11 +337,11 @@ void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) { ...@@ -337,11 +337,11 @@ void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) {
} }
} }
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h) { void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) {
// free the basement node // free the basement node
assert(!node->dirty); assert(!node->dirty);
BASEMENTNODE bn = BLB(node, childnum); BASEMENTNODE bn = BLB(node, childnum);
toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta); toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
destroy_basement_node(bn); destroy_basement_node(bn);
set_BNULL(node, childnum); set_BNULL(node, childnum);
BP_STATE(node, childnum) = PT_ON_DISK; BP_STATE(node, childnum) = PT_ON_DISK;
......
...@@ -297,7 +297,7 @@ void toku_destroy_ftnode_internals(FTNODE node); ...@@ -297,7 +297,7 @@ void toku_destroy_ftnode_internals(FTNODE node);
void toku_ftnode_free (FTNODE *node); void toku_ftnode_free (FTNODE *node);
bool toku_ftnode_fully_in_memory(FTNODE node); bool toku_ftnode_fully_in_memory(FTNODE node);
void toku_ftnode_assert_fully_in_memory(FTNODE node); void toku_ftnode_assert_fully_in_memory(FTNODE node);
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h); void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft);
BASEMENTNODE toku_detach_bn(FTNODE node, int childnum); BASEMENTNODE toku_detach_bn(FTNODE node, int childnum);
void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint); void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint);
void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node); void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node);
...@@ -345,7 +345,7 @@ struct ft_search; ...@@ -345,7 +345,7 @@ struct ft_search;
struct ftnode_fetch_extra { struct ftnode_fetch_extra {
enum ftnode_fetch_type type; enum ftnode_fetch_type type;
// needed for reading a node off disk // needed for reading a node off disk
FT h; FT ft;
// used in the case where type == ftnode_fetch_subset // used in the case where type == ftnode_fetch_subset
// parameters needed to find out which child needs to be decompressed (so it can be read) // parameters needed to find out which child needs to be decompressed (so it can be read)
ft_search *search; ft_search *search;
...@@ -406,7 +406,7 @@ long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc); ...@@ -406,7 +406,7 @@ long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc);
long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc); long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc);
void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp); void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp);
void toku_bnc_empty(NONLEAF_CHILDINFO bnc); void toku_bnc_empty(NONLEAF_CHILDINFO bnc);
void toku_bnc_flush_to_child(FT h, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known); void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known);
bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull)); bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull));
bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize); bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize);
......
...@@ -220,9 +220,9 @@ toku_rollback_fcreate (FILENUM filenum, ...@@ -220,9 +220,9 @@ toku_rollback_fcreate (FILENUM filenum,
return 0; return 0;
} }
int find_ft_from_filenum (const FT &h, const FILENUM &filenum); int find_ft_from_filenum (const FT &ft, const FILENUM &filenum);
int find_ft_from_filenum (const FT &h, const FILENUM &filenum) { int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) {
FILENUM thisfnum = toku_cachefile_filenum(h->cf); FILENUM thisfnum = toku_cachefile_filenum(ft->cf);
if (thisfnum.fileid<filenum.fileid) return -1; if (thisfnum.fileid<filenum.fileid) return -1;
if (thisfnum.fileid>filenum.fileid) return +1; if (thisfnum.fileid>filenum.fileid) return +1;
return 0; return 0;
...@@ -236,9 +236,8 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, ...@@ -236,9 +236,8 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
bool reset_root_xid_that_created) { bool reset_root_xid_that_created) {
int r = 0; int r = 0;
//printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data); //printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data);
FT h; FT ft = nullptr;
h = NULL; r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &ft, NULL);
r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &h, NULL);
if (r == DB_NOTFOUND) { if (r == DB_NOTFOUND) {
assert(txn->for_recovery); assert(txn->for_recovery);
r = 0; r = 0;
...@@ -247,7 +246,7 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, ...@@ -247,7 +246,7 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
assert(r==0); assert(r==0);
if (oplsn.lsn != 0) { // if we are executing the recovery algorithm if (oplsn.lsn != 0) { // if we are executing the recovery algorithm
LSN treelsn = toku_ft_checkpoint_lsn(h); LSN treelsn = toku_ft_checkpoint_lsn(ft);
if (oplsn.lsn <= treelsn.lsn) { // if operation was already applied to tree ... if (oplsn.lsn <= treelsn.lsn) { // if operation was already applied to tree ...
r = 0; // ... do not apply it again. r = 0; // ... do not apply it again.
goto done; goto done;
...@@ -275,10 +274,10 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key, ...@@ -275,10 +274,10 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
// no messages above us, we can implicitly promote uxrs based on this xid // no messages above us, we can implicitly promote uxrs based on this xid
oldest_referenced_xid_estimate, oldest_referenced_xid_estimate,
!txn->for_recovery); !txn->for_recovery);
toku_ft_root_put_msg(h, &ftmsg, &gc_info); toku_ft_root_put_msg(ft, &ftmsg, &gc_info);
if (reset_root_xid_that_created) { if (reset_root_xid_that_created) {
TXNID new_root_xid_that_created = xids_get_outermost_xid(xids); TXNID new_root_xid_that_created = xids_get_outermost_xid(xids);
toku_reset_root_xid_that_created(h, new_root_xid_that_created); toku_reset_root_xid_that_created(ft, new_root_xid_that_created);
} }
} }
done: done:
...@@ -579,15 +578,15 @@ toku_rollback_dictionary_redirect (FILENUM old_filenum, ...@@ -579,15 +578,15 @@ toku_rollback_dictionary_redirect (FILENUM old_filenum,
CACHEFILE new_cf = NULL; CACHEFILE new_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf); r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf);
assert(r == 0); assert(r == 0);
FT CAST_FROM_VOIDP(new_h, toku_cachefile_get_userdata(new_cf)); FT CAST_FROM_VOIDP(new_ft, toku_cachefile_get_userdata(new_cf));
CACHEFILE old_cf = NULL; CACHEFILE old_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf); r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf);
assert(r == 0); assert(r == 0);
FT CAST_FROM_VOIDP(old_h, toku_cachefile_get_userdata(old_cf)); FT CAST_FROM_VOIDP(old_ft, toku_cachefile_get_userdata(old_cf));
//Redirect back from new to old. //Redirect back from new to old.
r = toku_dictionary_redirect_abort(old_h, new_h, txn); r = toku_dictionary_redirect_abort(old_ft, new_ft, txn);
assert(r==0); assert(r==0);
} }
return r; return r;
......
...@@ -123,7 +123,7 @@ int toku_rollback_cleaner_callback ( ...@@ -123,7 +123,7 @@ int toku_rollback_cleaner_callback (
void* UU(extraargs) void* UU(extraargs)
); );
static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT h) { static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ft) {
CACHETABLE_WRITE_CALLBACK wc; CACHETABLE_WRITE_CALLBACK wc;
wc.flush_callback = toku_rollback_flush_callback; wc.flush_callback = toku_rollback_flush_callback;
wc.pe_est_callback = toku_rollback_pe_est_callback; wc.pe_est_callback = toku_rollback_pe_est_callback;
...@@ -131,6 +131,6 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ...@@ -131,6 +131,6 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT
wc.cleaner_callback = toku_rollback_cleaner_callback; wc.cleaner_callback = toku_rollback_cleaner_callback;
wc.clone_callback = toku_rollback_clone_callback; wc.clone_callback = toku_rollback_clone_callback;
wc.checkpoint_complete_callback = nullptr; wc.checkpoint_complete_callback = nullptr;
wc.write_extraargs = h; wc.write_extraargs = ft;
return wc; return wc;
} }
...@@ -137,7 +137,7 @@ void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len); ...@@ -137,7 +137,7 @@ void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len);
// if necessary. // if necessary.
void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log); void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log);
void toku_txn_maybe_note_ft (TOKUTXN txn, FT h); void toku_txn_maybe_note_ft (TOKUTXN txn, FT ft);
int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat); int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat);
int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind); int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind);
......
...@@ -106,7 +106,7 @@ save_data (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN vallen, bytevec val, voi ...@@ -106,7 +106,7 @@ save_data (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN vallen, bytevec val, voi
// Verify that different cursors return different data items when a DBT is initialized to all zeros (no flags) // Verify that different cursors return different data items when a DBT is initialized to all zeros (no flags)
// Note: The BRT test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more // Note: The ft test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more
// so this test is a little bit obsolete. // so this test is a little bit obsolete.
static void test_multiple_ft_cursor_dbts(int n) { static void test_multiple_ft_cursor_dbts(int n) {
if (verbose) printf("test_multiple_ft_cursors:%d\n", n); if (verbose) printf("test_multiple_ft_cursors:%d\n", n);
......
...@@ -185,7 +185,7 @@ verify_basement_node_msns(FTNODE node, MSN expected) ...@@ -185,7 +185,7 @@ verify_basement_node_msns(FTNODE node, MSN expected)
} }
// //
// Maximum node size according to the BRT: 1024 (expected node size after split) // Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 // Maximum basement node size: 256
// Actual node size before split: 2048 // Actual node size before split: 2048
// Actual basement node size before split: 256 // Actual basement node size before split: 256
...@@ -237,7 +237,7 @@ test_split_on_boundary(void) ...@@ -237,7 +237,7 @@ test_split_on_boundary(void)
} }
// //
// Maximum node size according to the BRT: 1024 (expected node size after split) // Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 (except the last) // Maximum basement node size: 256 (except the last)
// Actual node size before split: 4095 // Actual node size before split: 4095
// Actual basement node size before split: 256 (except the last, of size 2K) // Actual basement node size before split: 256 (except the last, of size 2K)
...@@ -302,7 +302,7 @@ test_split_with_everything_on_the_left(void) ...@@ -302,7 +302,7 @@ test_split_with_everything_on_the_left(void)
// //
// Maximum node size according to the BRT: 1024 (expected node size after split) // Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 (except the last) // Maximum basement node size: 256 (except the last)
// Actual node size before split: 4095 // Actual node size before split: 4095
// Actual basement node size before split: 256 (except the last, of size 2K) // Actual basement node size before split: 256 (except the last, of size 2K)
...@@ -487,7 +487,7 @@ test_split_at_end(void) ...@@ -487,7 +487,7 @@ test_split_at_end(void)
toku_destroy_ftnode_internals(&sn); toku_destroy_ftnode_internals(&sn);
} }
// Maximum node size according to the BRT: 1024 (expected node size after split) // Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 // Maximum basement node size: 256
// Actual node size before split: 2048 // Actual node size before split: 2048
// Actual basement node size before split: 256 // Actual basement node size before split: 256
......
...@@ -224,17 +224,17 @@ static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le, ...@@ -224,17 +224,17 @@ static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le,
return 0; return 0;
} }
static void dump_node(int fd, BLOCKNUM blocknum, FT h) { static void dump_node(int fd, BLOCKNUM blocknum, FT ft) {
FTNODE n; FTNODE n;
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
fill_bfe_for_full_read(&bfe, h); fill_bfe_for_full_read(&bfe, ft);
int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
assert_zero(r); assert_zero(r);
assert(n!=0); assert(n!=0);
printf("ftnode\n"); printf("ftnode\n");
DISKOFF disksize, diskoffset; DISKOFF disksize, diskoffset;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &diskoffset, &disksize); toku_translate_blocknum_to_offset_size(ft->blocktable, blocknum, &diskoffset, &disksize);
printf(" diskoffset =%" PRId64 "\n", diskoffset); printf(" diskoffset =%" PRId64 "\n", diskoffset);
printf(" disksize =%" PRId64 "\n", disksize); printf(" disksize =%" PRId64 "\n", disksize);
printf(" serialize_size =%u\n", toku_serialize_ftnode_size(n)); printf(" serialize_size =%u\n", toku_serialize_ftnode_size(n));
...@@ -331,14 +331,14 @@ static void dump_node(int fd, BLOCKNUM blocknum, FT h) { ...@@ -331,14 +331,14 @@ static void dump_node(int fd, BLOCKNUM blocknum, FT h) {
toku_free(ndd); toku_free(ndd);
} }
static void dump_block_translation(FT h, uint64_t offset) { static void dump_block_translation(FT ft, uint64_t offset) {
toku_blocknum_dump_translation(h->blocktable, make_blocknum(offset)); toku_blocknum_dump_translation(ft->blocktable, make_blocknum(offset));
} }
static void dump_fragmentation(int UU(f), FT h, int tsv) { static void dump_fragmentation(int UU(f), FT ft, int tsv) {
int64_t used_space; int64_t used_space;
int64_t total_space; int64_t total_space;
toku_blocktable_internal_fragmentation(h->blocktable, &total_space, &used_space); toku_blocktable_internal_fragmentation(ft->blocktable, &total_space, &used_space);
int64_t fragsizes = total_space - used_space; int64_t fragsizes = total_space - used_space;
if (tsv) { if (tsv) {
...@@ -354,7 +354,7 @@ static void dump_fragmentation(int UU(f), FT h, int tsv) { ...@@ -354,7 +354,7 @@ static void dump_fragmentation(int UU(f), FT h, int tsv) {
typedef struct { typedef struct {
int fd; int fd;
FT h; FT ft;
uint64_t blocksizes; uint64_t blocksizes;
uint64_t leafsizes; uint64_t leafsizes;
uint64_t leafblocks; uint64_t leafblocks;
...@@ -365,7 +365,7 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void ...@@ -365,7 +365,7 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void
FTNODE n; FTNODE n;
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->h); fill_bfe_for_full_read(&bfe, info->ft);
int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
if (r==0) { if (r==0) {
info->blocksizes += size; info->blocksizes += size;
...@@ -379,12 +379,12 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void ...@@ -379,12 +379,12 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void
return 0; return 0;
} }
static void dump_nodesizes(int fd, FT h) { static void dump_nodesizes(int fd, FT ft) {
frag_help_extra info; frag_help_extra info;
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
info.fd = fd; info.fd = fd;
info.h = h; info.ft = ft;
toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED, toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED,
nodesizes_helper, &info, true, true); nodesizes_helper, &info, true, true);
printf("leafblocks\t%" PRIu64 "\n", info.leafblocks); printf("leafblocks\t%" PRIu64 "\n", info.leafblocks);
printf("blocksizes\t%" PRIu64 "\n", info.blocksizes); printf("blocksizes\t%" PRIu64 "\n", info.blocksizes);
...@@ -402,12 +402,12 @@ static void dump_garbage_stats(int fd, FT ft) { ...@@ -402,12 +402,12 @@ static void dump_garbage_stats(int fd, FT ft) {
typedef struct __dump_node_extra { typedef struct __dump_node_extra {
int fd; int fd;
FT h; FT ft;
} dump_node_extra; } dump_node_extra;
static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) {
dump_node_extra *CAST_FROM_VOIDP(info, extra); dump_node_extra *CAST_FROM_VOIDP(info, extra);
dump_node(info->fd, b, info->h); dump_node(info->fd, b, info->ft);
return 0; return 0;
} }
...@@ -472,9 +472,9 @@ static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size) ...@@ -472,9 +472,9 @@ static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size)
printf("offset %u expected %" PRIu64 "\n", offset, size); printf("offset %u expected %" PRIu64 "\n", offset, size);
} }
static void dump_block(int fd, BLOCKNUM blocknum, FT h) { static void dump_block(int fd, BLOCKNUM blocknum, FT ft) {
DISKOFF offset, size; DISKOFF offset, size;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size); toku_translate_blocknum_to_offset_size(ft->blocktable, blocknum, &offset, &size);
printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size); printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size);
unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size)); unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size));
...@@ -698,7 +698,7 @@ int main (int argc, const char *const argv[]) { ...@@ -698,7 +698,7 @@ int main (int argc, const char *const argv[]) {
struct __dump_node_extra info; struct __dump_node_extra info;
info.fd = fd; info.fd = fd;
info.h = ft; info.ft = ft;
toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED, toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED,
dump_node_wrapper, &info, true, true); dump_node_wrapper, &info, true, true);
} }
......
...@@ -618,7 +618,7 @@ int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const UU(txn)) ...@@ -618,7 +618,7 @@ int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const UU(txn))
return 0; return 0;
} }
// for every BRT in txn, remove it. // for every ft in txn, remove it.
static void note_txn_closing (TOKUTXN txn) { static void note_txn_closing (TOKUTXN txn) {
txn->open_fts.iterate<struct tokutxn, remove_txn>(txn); txn->open_fts.iterate<struct tokutxn, remove_txn>(txn);
} }
......
...@@ -103,7 +103,7 @@ const int envflags = DB_INIT_MPOOL | ...@@ -103,7 +103,7 @@ const int envflags = DB_INIT_MPOOL |
DB_ENV* env; DB_ENV* env;
unsigned int leaf_hits; unsigned int leaf_hits;
// Custom Update Function for our test BRT. // Custom Update Function for our test FT.
static int static int
update_func(DB* UU(db), update_func(DB* UU(db),
const DBT* key, const DBT* key,
...@@ -266,7 +266,7 @@ test_main(int argc, char * const argv[]) ...@@ -266,7 +266,7 @@ test_main(int argc, char * const argv[])
default_parse_args(argc, argv); default_parse_args(argc, argv);
hot_test_setup(); hot_test_setup();
// Create and Open the Database/BRT // Create and Open the Database/FT
DB *db = NULL; DB *db = NULL;
const unsigned int BIG = 4000000; const unsigned int BIG = 4000000;
const unsigned int SMALL = 10; const unsigned int SMALL = 10;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment