Commit cbab8d8e authored by John Esmet's avatar John Esmet

FT-253 Remove remaining brt / brt_header era names

parent 061724be
......@@ -3,7 +3,6 @@ cmake_policy(SET CMP0012 NEW)
## these tests shouldn't run with valgrind
list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
ft/bnc-insert-benchmark
ft/brt-serialize-benchmark
ft/ft_loader-test-extractor-1
ft/ft_loader-test-extractor-2
ft/ft_loader-test-extractor-3
......
......@@ -145,7 +145,7 @@ void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
// Usage note: To allocate several blocks (e.g., when opening a BRT), use block_allocator_alloc_blocks_at().
// Usage note: To allocate several blocks (e.g., when opening a FT), use block_allocator_alloc_blocks_at().
// Requires: The resulting block may not overlap any other allocated block.
// And the offset must be a multiple of the block alignment.
// Parameters:
......
......@@ -109,8 +109,8 @@ void toku_blocktable_create_new(BLOCK_TABLE *btp);
int toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer);
void toku_blocktable_destroy(BLOCK_TABLE *btp);
void toku_ft_lock(FT h);
void toku_ft_unlock(FT h);
void toku_ft_lock(FT ft);
void toku_ft_unlock(FT ft);
void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt);
void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd);
......@@ -118,15 +118,15 @@ void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt);
void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd);
//Blocknums
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, bool for_checkpoint);
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT ft);
void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT ft);
void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT ft, bool for_checkpoint);
void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b);
void toku_block_verify_no_data_blocks_except_root(BLOCK_TABLE bt, BLOCKNUM root);
void toku_free_unused_blocknums(BLOCK_TABLE bt, BLOCKNUM root);
void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt);
void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h, int fd);
void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h);
void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft, int fd);
void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft);
void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size);
//Blocks and Blocknums
......
......@@ -104,16 +104,16 @@ ftnode_get_key_and_fullhash(
uint32_t* fullhash,
void* extra)
{
FT h = (FT) extra;
FT ft = (FT) extra;
BLOCKNUM name;
toku_allocate_blocknum(h->blocktable, &name, h);
toku_allocate_blocknum(ft->blocktable, &name, ft);
*cachekey = name;
*fullhash = toku_cachetable_hash(h->cf, name);
*fullhash = toku_cachetable_hash(ft->cf, name);
}
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
FT ft,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
......@@ -129,12 +129,12 @@ cachetable_put_empty_node_with_dep_nodes(
}
toku_cachetable_put_with_dep_pairs(
h->cf,
ft->cf,
ftnode_get_key_and_fullhash,
new_node,
make_pair_attr(sizeof(FTNODE)),
get_write_callbacks_for_node(h),
h,
get_write_callbacks_for_node(ft),
ft,
num_dependent_nodes,
dependent_pairs,
dependent_dirty_bits,
......@@ -319,7 +319,7 @@ toku_pin_ftnode_for_query(
void
toku_pin_ftnode_with_dep_nodes(
FT h,
FT ft,
BLOCKNUM blocknum,
uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
......@@ -338,12 +338,12 @@ toku_pin_ftnode_with_dep_nodes(
}
int r = toku_cachetable_get_and_pin_with_dep_pairs(
h->cf,
ft->cf,
blocknum,
fullhash,
&node_v,
NULL,
get_write_callbacks_for_node(h),
get_write_callbacks_for_node(ft),
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
......@@ -356,7 +356,7 @@ toku_pin_ftnode_with_dep_nodes(
invariant_zero(r);
FTNODE node = (FTNODE) node_v;
if (lock_type != PL_READ && node->height > 0 && move_messages) {
toku_move_ftnode_messages_to_stale(h, node);
toku_move_ftnode_messages_to_stale(ft, node);
}
*node_p = node;
}
......
......@@ -103,7 +103,7 @@ PATENT RIGHTS GRANT:
*/
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
FT ft,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
......@@ -118,7 +118,7 @@ cachetable_put_empty_node_with_dep_nodes(
*/
void
create_new_ftnode_with_dep_nodes(
FT h,
FT ft,
FTNODE *result,
int height,
int n_children,
......@@ -156,7 +156,7 @@ toku_pin_ftnode_for_query(
// Pins an ftnode without dependent pairs
void toku_pin_ftnode(
FT h,
FT ft,
BLOCKNUM blocknum,
uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
......@@ -168,7 +168,7 @@ void toku_pin_ftnode(
// Pins an ftnode with dependent pairs
// Unlike toku_pin_ftnode_for_query, this function blocks until the node is pinned.
void toku_pin_ftnode_with_dep_nodes(
FT h,
FT ft,
BLOCKNUM blocknum,
uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
......@@ -188,7 +188,7 @@ int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pai
/**
* Effect: Unpin an ftnode.
*/
void toku_unpin_ftnode(FT h, FTNODE node);
void toku_unpin_ftnode(FT ft, FTNODE node);
void toku_unpin_ftnode_read_only(FT ft, FTNODE node);
// Effect: Swaps pair values of two pinned nodes
......
......@@ -115,7 +115,7 @@ typedef struct flusher_advice FLUSHER_ADVICE;
* Cleaner thread merging leaf nodes: follow down to a key
* Hot optimize table: follow down to the right of a key
*/
typedef int (*FA_PICK_CHILD)(FT h, FTNODE parent, void* extra);
typedef int (*FA_PICK_CHILD)(FT ft, FTNODE parent, void* extra);
/**
* Decide whether to call `toku_ft_flush_some_child` on the child if it is
......@@ -139,7 +139,7 @@ typedef bool (*FA_SHOULD_RECURSIVELY_FLUSH)(FTNODE child, void* extra);
* Hot optimize table: just do the merge
*/
typedef void (*FA_MAYBE_MERGE_CHILD)(struct flusher_advice *fa,
FT h,
FT ft,
FTNODE parent,
int childnum,
FTNODE child,
......@@ -172,7 +172,7 @@ typedef void (*FA_UPDATE_STATUS)(FTNODE child, int dirtied, void* extra);
* by `ft_split_child`. If -1 is returned, `ft_split_child` defaults to
* the old behavior.
*/
typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT h,
typedef int (*FA_PICK_CHILD_AFTER_SPLIT)(FT ft,
FTNODE node,
int childnuma,
int childnumb,
......@@ -223,14 +223,14 @@ dont_destroy_basement_nodes(void* extra);
void
default_merge_child(struct flusher_advice *fa,
FT h,
FT ft,
FTNODE parent,
int childnum,
FTNODE child,
void* extra);
int
default_pick_child_after_split(FT h,
default_pick_child_after_split(FT ft,
FTNODE parent,
int childnuma,
int childnumb,
......
This diff is collapsed.
......@@ -163,7 +163,7 @@ enum split_mode {
// Given pinned node and pinned child, split child into two
// and update node with information about its new child.
void toku_ft_split_child(
FT h,
FT ft,
FTNODE node,
int childnum,
FTNODE child,
......@@ -189,7 +189,7 @@ void toku_ft_merge_child(
// TODO: Rename toku_ft_leaf_split
void
ftleaf_split(
FT h,
FT ft,
FTNODE node,
FTNODE *nodea,
FTNODE *nodeb,
......@@ -212,7 +212,7 @@ ftleaf_split(
void
// TODO: Rename toku_ft_nonleaf_split
ft_nonleaf_split(
FT h,
FT ft,
FTNODE node,
FTNODE *nodea,
FTNODE *nodeb,
......
......@@ -169,7 +169,7 @@ hot_set_start_key(struct hot_flusher_extra *flusher, const DBT* start)
}
static int
hot_just_pick_child(FT h,
hot_just_pick_child(FT ft,
FTNODE parent,
struct hot_flusher_extra *flusher)
{
......@@ -186,8 +186,8 @@ hot_just_pick_child(FT h,
// Find the pivot boundary.
childnum = toku_ftnode_hot_next_child(parent,
&flusher->highest_pivot_key,
&h->cmp_descriptor,
h->compare_fun);
&ft->cmp_descriptor,
ft->compare_fun);
}
return childnum;
......@@ -209,12 +209,12 @@ hot_update_flusher_keys(FTNODE parent,
// Picks which child toku_ft_flush_some_child will use for flushing and
// recursion.
static int
hot_pick_child(FT h,
hot_pick_child(FT ft,
FTNODE parent,
void *extra)
{
struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
int childnum = hot_just_pick_child(h, parent, flusher);
int childnum = hot_just_pick_child(ft, parent, flusher);
// Now we determine the percentage of the tree flushed so far.
......@@ -244,14 +244,14 @@ hot_update_status(FTNODE UU(child),
// one to flush into. This gives it a chance to do that, and update the
// keys it maintains.
static int
hot_pick_child_after_split(FT h,
hot_pick_child_after_split(FT ft,
FTNODE parent,
int childnuma,
int childnumb,
void *extra)
{
struct hot_flusher_extra *flusher = (struct hot_flusher_extra *) extra;
int childnum = hot_just_pick_child(h, parent, flusher);
int childnum = hot_just_pick_child(ft, parent, flusher);
assert(childnum == childnuma || childnum == childnumb);
hot_update_flusher_keys(parent, childnum, flusher);
if (parent->height == 1) {
......
......@@ -324,11 +324,11 @@ int toku_serialize_ftnode_to_memory (FTNODE node,
/*out*/ size_t *n_bytes_to_write,
/*out*/ size_t *n_uncompressed_bytes,
/*out*/ char **bytes_to_write);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT ft, bool for_checkpoint);
int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized,
FT h, bool for_checkpoint);
FT ft, bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT h);
int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra* bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from (int fd, BLOCKNUM off, uint32_t /*fullhash*/, FTNODE *ftnode, FTNODE_DISK_DATA* ndd, struct ftnode_fetch_extra* bfe);
......@@ -353,7 +353,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ft, uint32_t version);
void read_block_from_fd_into_rbuf(
int fd,
BLOCKNUM blocknum,
FT h,
FT ft,
struct rbuf *rb
);
......@@ -492,7 +492,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_h, BLOCKNUM, const char *key, in
int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_h, BLOCKNUM, enum ft_msg_type, const char *key, int keylen, const char *val, int vallen);
void toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t);
void toku_ft_root_put_msg(FT h, FT_MSG msg, txn_gc_info *gc_info);
void toku_ft_root_put_msg(FT ft, FT_MSG msg, txn_gc_info *gc_info);
void
toku_get_node_for_verify(
......@@ -667,8 +667,8 @@ void toku_ft_get_status(FT_STATUS);
void toku_flusher_thread_set_callback(void (*callback_f)(int, void*), void* extra);
int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT h) __attribute__((nonnull));
int toku_upgrade_msn_from_root_to_header(int fd, FT h) __attribute__((nonnull));
int toku_upgrade_subtree_estimates_to_stat64info(int fd, FT ft) __attribute__((nonnull));
int toku_upgrade_msn_from_root_to_header(int fd, FT ft) __attribute__((nonnull));
// A callback function is invoked with the key, and the data.
// The pointers (to the bytevecs) must not be modified. The data must be copied out before the callback function returns.
......
......@@ -600,7 +600,7 @@ toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node)
} else if (bfe->range_lock_left_key.data == nullptr) {
return -1;
} else {
return toku_ftnode_which_child(node, &bfe->range_lock_left_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun);
return toku_ftnode_which_child(node, &bfe->range_lock_left_key, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun);
}
}
......@@ -613,7 +613,7 @@ toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node)
} else if (bfe->range_lock_right_key.data == nullptr) {
return -1;
} else {
return toku_ftnode_which_child(node, &bfe->range_lock_right_key, &bfe->h->cmp_descriptor, bfe->h->compare_fun);
return toku_ftnode_which_child(node, &bfe->range_lock_right_key, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun);
}
}
......@@ -763,7 +763,7 @@ void toku_ftnode_flush_callback(
bool is_clone
)
{
FT h = (FT) extraargs;
FT ft = (FT) extraargs;
FTNODE ftnode = (FTNODE) ftnode_v;
FTNODE_DISK_DATA* ndd = (FTNODE_DISK_DATA*)disk_data;
assert(ftnode->blocknum.b == blocknum.b);
......@@ -772,14 +772,14 @@ void toku_ftnode_flush_callback(
toku_ftnode_assert_fully_in_memory(ftnode);
if (height > 0 && !is_clone) {
// cloned nodes already had their stale messages moved, see toku_ftnode_clone_callback()
toku_move_ftnode_messages_to_stale(h, ftnode);
toku_move_ftnode_messages_to_stale(ft, ftnode);
} else if (height == 0) {
toku_ftnode_leaf_run_gc(h, ftnode);
toku_ftnode_leaf_run_gc(ft, ftnode);
if (!is_clone) {
toku_ftnode_update_disk_stats(ftnode, h, for_checkpoint);
toku_ftnode_update_disk_stats(ftnode, ft, for_checkpoint);
}
}
int r = toku_serialize_ftnode_to(fd, ftnode->blocknum, ftnode, ndd, !is_clone, h, for_checkpoint);
int r = toku_serialize_ftnode_to(fd, ftnode->blocknum, ftnode, ndd, !is_clone, ft, for_checkpoint);
assert_zero(r);
ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION;
}
......@@ -800,7 +800,7 @@ void toku_ftnode_flush_callback(
for (int i = 0; i < ftnode->n_children; i++) {
if (BP_STATE(ftnode,i) == PT_AVAIL) {
BASEMENTNODE bn = BLB(ftnode, i);
toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta);
toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
}
}
}
......@@ -1125,11 +1125,11 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// we can possibly require is a single basement node
// we find out what basement node the query cares about
// and check if it is available
paranoid_invariant(bfe->h->compare_fun);
paranoid_invariant(bfe->ft->compare_fun);
paranoid_invariant(bfe->search);
bfe->child_to_read = toku_ft_search_which_child(
&bfe->h->cmp_descriptor,
bfe->h->compare_fun,
&bfe->ft->cmp_descriptor,
bfe->ft->compare_fun,
node,
bfe->search
);
......@@ -1154,7 +1154,7 @@ bool toku_ftnode_pf_req_callback(void* ftnode_pv, void* read_extraargs) {
// we can possibly require is a single basement node
// we find out what basement node the query cares about
// and check if it is available
paranoid_invariant(bfe->h->compare_fun);
paranoid_invariant(bfe->ft->compare_fun);
if (node->height == 0) {
int left_child = toku_bfe_leftmost_child_wanted(bfe, node);
int right_child = toku_bfe_rightmost_child_wanted(bfe, node);
......@@ -1342,7 +1342,7 @@ int toku_ftnode_pf_callback(void* ftnode_pv, void* disk_data, void* read_extraar
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr,
"Checksum failure while reading node partition in file %s.\n",
toku_cachefile_fname_in_env(bfe->h->cf));
toku_cachefile_fname_in_env(bfe->ft->cf));
} else {
fprintf(stderr,
"Error while reading node partition %d\n",
......@@ -1363,9 +1363,9 @@ int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_he
return be.compare_fun(&db, &kdbt, key);
}
void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) {
void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT ft) {
bfe->type = ftnode_fetch_all;
bfe->h = h;
bfe->ft = ft;
bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key);
......@@ -1380,12 +1380,12 @@ void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT h) {
bfe->decompress_time = 0;
}
void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT h,
void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT ft,
const DBT *left, const DBT *right,
bool disable_prefetching, bool read_all_partitions) {
paranoid_invariant(h->h->type == FT_CURRENT);
paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_keymatch;
bfe->h = h;
bfe->ft = ft;
bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key);
......@@ -1407,13 +1407,13 @@ void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT h,
bfe->decompress_time = 0;
}
void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT h, ft_search *search,
void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT ft, ft_search *search,
const DBT *left, const DBT *right,
bool left_is_neg_infty, bool right_is_pos_infty,
bool disable_prefetching, bool read_all_partitions) {
paranoid_invariant(h->h->type == FT_CURRENT);
paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_subset;
bfe->h = h;
bfe->ft = ft;
bfe->search = search;
toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key);
......@@ -1437,7 +1437,7 @@ void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT h, ft_search *s
void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) {
paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_none;
bfe->h = ft;
bfe->ft = ft;
bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key);
......@@ -1455,7 +1455,7 @@ void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft) {
void fill_bfe_for_prefetch(struct ftnode_fetch_extra *bfe, FT ft, struct ft_cursor *cursor) {
paranoid_invariant(ft->h->type == FT_CURRENT);
bfe->type = ftnode_fetch_prefetch;
bfe->h = ft;
bfe->ft = ft;
bfe->search = nullptr;
toku_init_dbt(&bfe->range_lock_left_key);
toku_init_dbt(&bfe->range_lock_right_key);
......@@ -3175,9 +3175,8 @@ toku_ft_handle_open_with_dict_id(
DICTIONARY_ID
toku_ft_get_dictionary_id(FT_HANDLE ft_handle) {
FT h = ft_handle->ft;
DICTIONARY_ID dict_id = h->dict_id;
return dict_id;
FT ft = ft_handle->ft;
return ft->dict_id;
}
void toku_ft_set_flags(FT_HANDLE ft_handle, unsigned int flags) {
......
......@@ -184,7 +184,7 @@ void toku_ft_handle_close_recovery(FT_HANDLE ft_handle, LSN oplsn);
int
toku_ft_handle_open_with_dict_id(
FT_HANDLE t,
FT_HANDLE ft_h,
const char *fname_in_env,
int is_create,
int only_create,
......
......@@ -472,19 +472,19 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
// max_acceptable_lsn is the latest acceptable checkpointed version of the file.
{
{
FT h;
if ((h = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = h;
assert(ft_handle->options.update_fun == h->update_fun);
assert(ft_handle->options.compare_fun == h->compare_fun);
FT ft;
if ((ft = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = ft;
assert(ft_handle->options.update_fun == ft->update_fun);
assert(ft_handle->options.compare_fun == ft->compare_fun);
return 0;
}
}
FT h = nullptr;
FT ft = nullptr;
int r;
{
int fd = toku_cachefile_get_fd(cf);
r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &h);
r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft);
if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else
......@@ -492,12 +492,12 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
}
if (r!=0) return r;
// GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized.
invariant_notnull(h);
h->cf = cf;
h->compare_fun = ft_handle->options.compare_fun;
h->update_fun = ft_handle->options.update_fun;
invariant_notnull(ft);
ft->cf = cf;
ft->compare_fun = ft_handle->options.compare_fun;
ft->update_fun = ft_handle->options.update_fun;
toku_cachefile_set_userdata(cf,
(void*)h,
reinterpret_cast<void *>(ft),
ft_log_fassociate_during_checkpoint,
ft_close,
ft_free,
......@@ -506,7 +506,7 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
ft_end_checkpoint,
ft_note_pin_by_checkpoint,
ft_note_unpin_by_checkpoint);
*header = h;
*header = ft;
return 0;
}
......@@ -548,12 +548,12 @@ void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn) {
}
// Verifies there exists exactly one ft handle and returns it.
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h) {
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft) {
FT_HANDLE ft_handle_ret = NULL;
toku_ft_grab_reflock(h);
assert(toku_list_num_elements_est(&h->live_ft_handles) == 1);
ft_handle_ret = toku_list_struct(toku_list_head(&h->live_ft_handles), struct ft_handle, live_ft_handle_link);
toku_ft_release_reflock(h);
toku_ft_grab_reflock(ft);
assert(toku_list_num_elements_est(&ft->live_ft_handles) == 1);
ft_handle_ret = toku_list_struct(toku_list_head(&ft->live_ft_handles), struct ft_handle, live_ft_handle_link);
toku_ft_release_reflock(ft);
return ft_handle_ret;
}
......@@ -628,27 +628,27 @@ toku_ft_init(FT ft,
// Open an ft for use by redirect. The new ft must have the same dict_id as the old_ft passed in. (FILENUM is assigned by the ft_handle_open() function.)
static int
ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_h) {
FT_HANDLE t;
assert(old_h->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_ft_handle_create(&t);
toku_ft_set_bt_compare(t, old_h->compare_fun);
toku_ft_set_update(t, old_h->update_fun);
toku_ft_handle_set_nodesize(t, old_h->h->nodesize);
toku_ft_handle_set_basementnodesize(t, old_h->h->basementnodesize);
toku_ft_handle_set_compression_method(t, old_h->h->compression_method);
toku_ft_handle_set_fanout(t, old_h->h->fanout);
CACHETABLE ct = toku_cachefile_get_cachetable(old_h->cf);
int r = toku_ft_handle_open_with_dict_id(t, fname_in_env, 0, 0, ct, txn, old_h->dict_id);
ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTXN txn, FT old_ft) {
FT_HANDLE ft_handle;
assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_ft_handle_create(&ft_handle);
toku_ft_set_bt_compare(ft_handle, old_ft->compare_fun);
toku_ft_set_update(ft_handle, old_ft->update_fun);
toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize);
toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize);
toku_ft_handle_set_compression_method(ft_handle, old_ft->h->compression_method);
toku_ft_handle_set_fanout(ft_handle, old_ft->h->fanout);
CACHETABLE ct = toku_cachefile_get_cachetable(old_ft->cf);
int r = toku_ft_handle_open_with_dict_id(ft_handle, fname_in_env, 0, 0, ct, txn, old_ft->dict_id);
if (r != 0) {
goto cleanup;
}
assert(t->ft->dict_id.dictid == old_h->dict_id.dictid);
*new_ftp = t;
assert(ft_handle->ft->dict_id.dictid == old_ft->dict_id.dictid);
*new_ftp = ft_handle;
cleanup:
if (r != 0) {
toku_ft_handle_close(t);
toku_ft_handle_close(ft_handle);
}
return r;
}
......@@ -656,81 +656,81 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX
// This function performs most of the work to redirect a dictionary to different file.
// It is called for redirect and to abort a redirect. (This function is almost its own inverse.)
static int
dictionary_redirect_internal(const char *dst_fname_in_env, FT src_h, TOKUTXN txn, FT *dst_hp) {
dictionary_redirect_internal(const char *dst_fname_in_env, FT src_ft, TOKUTXN txn, FT *dst_ftp) {
int r;
FILENUM src_filenum = toku_cachefile_filenum(src_h->cf);
FILENUM src_filenum = toku_cachefile_filenum(src_ft->cf);
FILENUM dst_filenum = FILENUM_NONE;
FT dst_h = NULL;
FT dst_ft = NULL;
struct toku_list *list;
// open a dummy ft based off of
// dst_fname_in_env to get the header
// then we will change all the ft's to have
// their headers point to dst_h instead of src_h
// their headers point to dst_ft instead of src_ft
FT_HANDLE tmp_dst_ft = NULL;
r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_h);
r = ft_handle_open_for_redirect(&tmp_dst_ft, dst_fname_in_env, txn, src_ft);
if (r != 0) {
goto cleanup;
}
dst_h = tmp_dst_ft->ft;
dst_ft = tmp_dst_ft->ft;
// some sanity checks on dst_filenum
dst_filenum = toku_cachefile_filenum(dst_h->cf);
dst_filenum = toku_cachefile_filenum(dst_ft->cf);
assert(dst_filenum.fileid!=FILENUM_NONE.fileid);
assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file.
// for each live ft_handle, ft_handle->ft is currently src_h
// for each live ft_handle, ft_handle->ft is currently src_ft
// we want to change it to dummy_dst
toku_ft_grab_reflock(src_h);
while (!toku_list_empty(&src_h->live_ft_handles)) {
list = src_h->live_ft_handles.next;
toku_ft_grab_reflock(src_ft);
while (!toku_list_empty(&src_ft->live_ft_handles)) {
list = src_ft->live_ft_handles.next;
FT_HANDLE src_handle = NULL;
src_handle = toku_list_struct(list, struct ft_handle, live_ft_handle_link);
toku_list_remove(&src_handle->live_ft_handle_link);
toku_ft_note_ft_handle_open(dst_h, src_handle);
toku_ft_note_ft_handle_open(dst_ft, src_handle);
if (src_handle->redirect_callback) {
src_handle->redirect_callback(src_handle, src_handle->redirect_callback_extra);
}
}
assert(dst_h);
// making sure that we are not leaking src_h
assert(toku_ft_needed_unlocked(src_h));
toku_ft_release_reflock(src_h);
assert(dst_ft);
// making sure that we are not leaking src_ft
assert(toku_ft_needed_unlocked(src_ft));
toku_ft_release_reflock(src_ft);
toku_ft_handle_close(tmp_dst_ft);
*dst_hp = dst_h;
*dst_ftp = dst_ft;
cleanup:
return r;
}
//This is the 'abort redirect' function. The redirect of old_h to new_h was done
//and now must be undone, so here we redirect new_h back to old_h.
//This is the 'abort redirect' function. The redirect of old_ft to new_ft was done
//and now must be undone, so here we redirect new_ft back to old_ft.
int
toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) {
char *old_fname_in_env = toku_cachefile_fname_in_env(old_h->cf);
toku_dictionary_redirect_abort(FT old_ft, FT new_ft, TOKUTXN txn) {
char *old_fname_in_env = toku_cachefile_fname_in_env(old_ft->cf);
int r;
{
FILENUM old_filenum = toku_cachefile_filenum(old_h->cf);
FILENUM new_filenum = toku_cachefile_filenum(new_h->cf);
FILENUM old_filenum = toku_cachefile_filenum(old_ft->cf);
FILENUM new_filenum = toku_cachefile_filenum(new_ft->cf);
assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file.
//No living fts in old header.
toku_ft_grab_reflock(old_h);
assert(toku_list_empty(&old_h->live_ft_handles));
toku_ft_release_reflock(old_h);
toku_ft_grab_reflock(old_ft);
assert(toku_list_empty(&old_ft->live_ft_handles));
toku_ft_release_reflock(old_ft);
}
FT dst_h;
// redirect back from new_h to old_h
r = dictionary_redirect_internal(old_fname_in_env, new_h, txn, &dst_h);
FT dst_ft;
// redirect back from new_ft to old_ft
r = dictionary_redirect_internal(old_fname_in_env, new_ft, txn, &dst_ft);
if (r == 0) {
assert(dst_h == old_h);
assert(dst_ft == old_ft);
}
return r;
}
......
......@@ -110,7 +110,7 @@ void toku_ft_grab_reflock(FT ft);
void toku_ft_release_reflock(FT ft);
void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn);
void toku_ft_free (FT h);
void toku_ft_free (FT ft);
int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_h, CACHEFILE cf, LSN max_acceptable_lsn, FT *header);
void toku_ft_note_ft_handle_open(FT ft, FT_HANDLE live);
......@@ -122,7 +122,7 @@ bool toku_ft_has_one_reference_unlocked(FT ft);
// will have to read in the ft in a new cachefile and new FT object.
void toku_ft_evict_from_memory(FT ft, bool oplsn_valid, LSN oplsn);
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT h);
FT_HANDLE toku_ft_get_only_existing_ft_handle(FT ft);
void toku_ft_note_hot_begin(FT_HANDLE ft_h);
void toku_ft_note_hot_complete(FT_HANDLE ft_h, bool success, MSN msn_at_start_of_hot);
......@@ -141,18 +141,18 @@ toku_ft_init(
int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result));
int toku_dictionary_redirect (const char *dst_fname_in_env, FT_HANDLE old_ft, TOKUTXN txn);
void toku_reset_root_xid_that_created(FT h, TXNID new_root_xid_that_created);
void toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created);
// Reset the root_xid_that_created field to the given value.
// This redefines which xid created the dictionary.
void toku_ft_add_txn_ref(FT h);
void toku_ft_remove_txn_ref(FT h);
void toku_ft_add_txn_ref(FT ft);
void toku_ft_remove_txn_ref(FT ft);
void toku_calculate_root_offset_pointer ( FT h, CACHEKEY* root_key, uint32_t *roothash);
void toku_ft_set_new_root_blocknum(FT h, CACHEKEY new_root_key);
LSN toku_ft_checkpoint_lsn(FT h) __attribute__ ((warn_unused_result));
void toku_ft_stat64 (FT h, struct ftstat64_s *s);
void toku_ft_get_fractal_tree_info64 (FT h, struct ftinfo64 *s);
void toku_calculate_root_offset_pointer (FT ft, CACHEKEY* root_key, uint32_t *roothash);
void toku_ft_set_new_root_blocknum(FT ft, CACHEKEY new_root_key);
LSN toku_ft_checkpoint_lsn(FT ft) __attribute__ ((warn_unused_result));
void toku_ft_stat64 (FT ft, struct ftstat64_s *s);
void toku_ft_get_fractal_tree_info64 (FT ft, struct ftinfo64 *s);
int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,int64_t,int64_t,int64_t,void*), void *iter_extra);
// unconditionally set the descriptor for an open FT. can't do this when
......
This diff is collapsed.
......@@ -642,7 +642,7 @@ int toku_ft_loader_internal_init (/* out */ FTLOADER *blp,
return 0;
}
int toku_ft_loader_open (/* out */ FTLOADER *blp,
int toku_ft_loader_open (FTLOADER *blp, /* out */
CACHETABLE cachetable,
generate_row_for_put_func g,
DB *src_db,
......@@ -656,9 +656,9 @@ int toku_ft_loader_open (/* out */ FTLOADER *blp,
uint64_t reserve_memory_size,
bool compress_intermediates,
bool allow_puts) {
// Effect: called by DB_ENV->create_loader to create a brt loader.
// Effect: called by DB_ENV->create_loader to create an ft loader.
// Arguments:
// blp Return the brt loader here.
// blp Return a ft loader ("bulk loader") here.
// g The function for generating a row
// src_db The source database. Needed by g. May be NULL if that's ok with g.
// N The number of dbs to create.
......@@ -2220,16 +2220,16 @@ struct dbout {
int64_t n_translations_limit;
struct translation *translation;
toku_mutex_t mutex;
FT h;
FT ft;
};
static inline void dbout_init(struct dbout *out, FT h) {
static inline void dbout_init(struct dbout *out, FT ft) {
out->fd = -1;
out->current_off = 0;
out->n_translations = out->n_translations_limit = 0;
out->translation = NULL;
toku_mutex_init(&out->mutex, NULL);
out->h = h;
out->ft = ft;
}
static inline void dbout_destroy(struct dbout *out) {
......@@ -2615,7 +2615,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
{
invariant(sts.n_subtrees==1);
out.h->h->root_blocknum = make_blocknum(sts.subtrees[0].block);
out.ft->h->root_blocknum = make_blocknum(sts.subtrees[0].block);
toku_free(sts.subtrees); sts.subtrees = NULL;
// write the descriptor
......@@ -3037,7 +3037,7 @@ static int write_translation_table (struct dbout *out, long long *off_of_transla
static int
write_header (struct dbout *out, long long translation_location_on_disk, long long translation_size_on_disk) {
int result = 0;
size_t size = toku_serialize_ft_size(out->h->h);
size_t size = toku_serialize_ft_size(out->ft->h);
size_t alloced_size = roundup_to_multiple(512, size);
struct wbuf wbuf;
char *MALLOC_N_ALIGNED(512, alloced_size, buf);
......@@ -3045,8 +3045,8 @@ write_header (struct dbout *out, long long translation_location_on_disk, long lo
result = get_error_errno();
} else {
wbuf_init(&wbuf, buf, size);
out->h->h->on_disk_stats = out->h->in_memory_stats;
toku_serialize_ft_to_wbuf(&wbuf, out->h->h, translation_location_on_disk, translation_size_on_disk);
out->ft->h->on_disk_stats = out->ft->in_memory_stats;
toku_serialize_ft_to_wbuf(&wbuf, out->ft->h, translation_location_on_disk, translation_size_on_disk);
for (size_t i=size; i<alloced_size; i++) buf[i]=0; // initialize all those unused spots to zero
if (wbuf.ndone != size)
result = EINVAL;
......
......@@ -337,11 +337,11 @@ void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node) {
}
}
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h) {
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft) {
// free the basement node
assert(!node->dirty);
BASEMENTNODE bn = BLB(node, childnum);
toku_ft_decrease_stats(&h->in_memory_stats, bn->stat64_delta);
toku_ft_decrease_stats(&ft->in_memory_stats, bn->stat64_delta);
destroy_basement_node(bn);
set_BNULL(node, childnum);
BP_STATE(node, childnum) = PT_ON_DISK;
......
......@@ -297,7 +297,7 @@ void toku_destroy_ftnode_internals(FTNODE node);
void toku_ftnode_free (FTNODE *node);
bool toku_ftnode_fully_in_memory(FTNODE node);
void toku_ftnode_assert_fully_in_memory(FTNODE node);
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h);
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT ft);
BASEMENTNODE toku_detach_bn(FTNODE node, int childnum);
void toku_ftnode_update_disk_stats(FTNODE ftnode, FT ft, bool for_checkpoint);
void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node);
......@@ -345,7 +345,7 @@ struct ft_search;
struct ftnode_fetch_extra {
enum ftnode_fetch_type type;
// needed for reading a node off disk
FT h;
FT ft;
// used in the case where type == ftnode_fetch_subset
// parameters needed to find out which child needs to be decompressed (so it can be read)
ft_search *search;
......@@ -406,7 +406,7 @@ long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc);
long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc);
void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp);
void toku_bnc_empty(NONLEAF_CHILDINFO bnc);
void toku_bnc_flush_to_child(FT h, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known);
void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known);
bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull));
bool toku_ftnode_nonleaf_is_gorged(FTNODE node, uint32_t nodesize);
......
......@@ -220,9 +220,9 @@ toku_rollback_fcreate (FILENUM filenum,
return 0;
}
int find_ft_from_filenum (const FT &h, const FILENUM &filenum);
int find_ft_from_filenum (const FT &h, const FILENUM &filenum) {
FILENUM thisfnum = toku_cachefile_filenum(h->cf);
int find_ft_from_filenum (const FT &ft, const FILENUM &filenum);
int find_ft_from_filenum (const FT &ft, const FILENUM &filenum) {
FILENUM thisfnum = toku_cachefile_filenum(ft->cf);
if (thisfnum.fileid<filenum.fileid) return -1;
if (thisfnum.fileid>filenum.fileid) return +1;
return 0;
......@@ -236,9 +236,8 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
bool reset_root_xid_that_created) {
int r = 0;
//printf("%s:%d committing insert %s %s\n", __FILE__, __LINE__, key.data, data.data);
FT h;
h = NULL;
r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &h, NULL);
FT ft = nullptr;
r = txn->open_fts.find_zero<FILENUM, find_ft_from_filenum>(filenum, &ft, NULL);
if (r == DB_NOTFOUND) {
assert(txn->for_recovery);
r = 0;
......@@ -247,7 +246,7 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
assert(r==0);
if (oplsn.lsn != 0) { // if we are executing the recovery algorithm
LSN treelsn = toku_ft_checkpoint_lsn(h);
LSN treelsn = toku_ft_checkpoint_lsn(ft);
if (oplsn.lsn <= treelsn.lsn) { // if operation was already applied to tree ...
r = 0; // ... do not apply it again.
goto done;
......@@ -275,10 +274,10 @@ static int do_insertion (enum ft_msg_type type, FILENUM filenum, BYTESTRING key,
// no messages above us, we can implicitly promote uxrs based on this xid
oldest_referenced_xid_estimate,
!txn->for_recovery);
toku_ft_root_put_msg(h, &ftmsg, &gc_info);
toku_ft_root_put_msg(ft, &ftmsg, &gc_info);
if (reset_root_xid_that_created) {
TXNID new_root_xid_that_created = xids_get_outermost_xid(xids);
toku_reset_root_xid_that_created(h, new_root_xid_that_created);
toku_reset_root_xid_that_created(ft, new_root_xid_that_created);
}
}
done:
......@@ -579,15 +578,15 @@ toku_rollback_dictionary_redirect (FILENUM old_filenum,
CACHEFILE new_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, new_filenum, &new_cf);
assert(r == 0);
FT CAST_FROM_VOIDP(new_h, toku_cachefile_get_userdata(new_cf));
FT CAST_FROM_VOIDP(new_ft, toku_cachefile_get_userdata(new_cf));
CACHEFILE old_cf = NULL;
r = toku_cachefile_of_filenum(txn->logger->ct, old_filenum, &old_cf);
assert(r == 0);
FT CAST_FROM_VOIDP(old_h, toku_cachefile_get_userdata(old_cf));
FT CAST_FROM_VOIDP(old_ft, toku_cachefile_get_userdata(old_cf));
//Redirect back from new to old.
r = toku_dictionary_redirect_abort(old_h, new_h, txn);
r = toku_dictionary_redirect_abort(old_ft, new_ft, txn);
assert(r==0);
}
return r;
......
......@@ -123,7 +123,7 @@ int toku_rollback_cleaner_callback (
void* UU(extraargs)
);
static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT h) {
static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ft) {
CACHETABLE_WRITE_CALLBACK wc;
wc.flush_callback = toku_rollback_flush_callback;
wc.pe_est_callback = toku_rollback_pe_est_callback;
......@@ -131,6 +131,6 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT
wc.cleaner_callback = toku_rollback_cleaner_callback;
wc.clone_callback = toku_rollback_clone_callback;
wc.checkpoint_complete_callback = nullptr;
wc.write_extraargs = h;
wc.write_extraargs = ft;
return wc;
}
......@@ -137,7 +137,7 @@ void *toku_memdup_in_rollback(ROLLBACK_LOG_NODE log, const void *v, size_t len);
// if necessary.
void toku_maybe_spill_rollbacks(TOKUTXN txn, ROLLBACK_LOG_NODE log);
void toku_txn_maybe_note_ft (TOKUTXN txn, FT h);
void toku_txn_maybe_note_ft (TOKUTXN txn, FT ft);
int toku_logger_txn_rollback_stats(TOKUTXN txn, struct txn_stat *txn_stat);
int toku_find_xid_by_xid (const TXNID &xid, const TXNID &xidfind);
......
......@@ -106,7 +106,7 @@ save_data (ITEMLEN UU(keylen), bytevec UU(key), ITEMLEN vallen, bytevec val, voi
// Verify that different cursors return different data items when a DBT is initialized to all zeros (no flags)
// Note: The BRT test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more
// Note: The ft test used to implement DBTs with per-cursor allocated space, but there isn't any such thing any more
// so this test is a little bit obsolete.
static void test_multiple_ft_cursor_dbts(int n) {
if (verbose) printf("test_multiple_ft_cursors:%d\n", n);
......
......@@ -185,7 +185,7 @@ verify_basement_node_msns(FTNODE node, MSN expected)
}
//
// Maximum node size according to the BRT: 1024 (expected node size after split)
// Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256
// Actual node size before split: 2048
// Actual basement node size before split: 256
......@@ -237,7 +237,7 @@ test_split_on_boundary(void)
}
//
// Maximum node size according to the BRT: 1024 (expected node size after split)
// Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 (except the last)
// Actual node size before split: 4095
// Actual basement node size before split: 256 (except the last, of size 2K)
......@@ -302,7 +302,7 @@ test_split_with_everything_on_the_left(void)
//
// Maximum node size according to the BRT: 1024 (expected node size after split)
// Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256 (except the last)
// Actual node size before split: 4095
// Actual basement node size before split: 256 (except the last, of size 2K)
......@@ -487,7 +487,7 @@ test_split_at_end(void)
toku_destroy_ftnode_internals(&sn);
}
// Maximum node size according to the BRT: 1024 (expected node size after split)
// Maximum node size according to the FT: 1024 (expected node size after split)
// Maximum basement node size: 256
// Actual node size before split: 2048
// Actual basement node size before split: 256
......
......@@ -224,17 +224,17 @@ static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le,
return 0;
}
static void dump_node(int fd, BLOCKNUM blocknum, FT h) {
static void dump_node(int fd, BLOCKNUM blocknum, FT ft) {
FTNODE n;
struct ftnode_fetch_extra bfe;
FTNODE_DISK_DATA ndd = NULL;
fill_bfe_for_full_read(&bfe, h);
fill_bfe_for_full_read(&bfe, ft);
int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
assert_zero(r);
assert(n!=0);
printf("ftnode\n");
DISKOFF disksize, diskoffset;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &diskoffset, &disksize);
toku_translate_blocknum_to_offset_size(ft->blocktable, blocknum, &diskoffset, &disksize);
printf(" diskoffset =%" PRId64 "\n", diskoffset);
printf(" disksize =%" PRId64 "\n", disksize);
printf(" serialize_size =%u\n", toku_serialize_ftnode_size(n));
......@@ -331,14 +331,14 @@ static void dump_node(int fd, BLOCKNUM blocknum, FT h) {
toku_free(ndd);
}
static void dump_block_translation(FT h, uint64_t offset) {
toku_blocknum_dump_translation(h->blocktable, make_blocknum(offset));
static void dump_block_translation(FT ft, uint64_t offset) {
toku_blocknum_dump_translation(ft->blocktable, make_blocknum(offset));
}
static void dump_fragmentation(int UU(f), FT h, int tsv) {
static void dump_fragmentation(int UU(f), FT ft, int tsv) {
int64_t used_space;
int64_t total_space;
toku_blocktable_internal_fragmentation(h->blocktable, &total_space, &used_space);
toku_blocktable_internal_fragmentation(ft->blocktable, &total_space, &used_space);
int64_t fragsizes = total_space - used_space;
if (tsv) {
......@@ -354,7 +354,7 @@ static void dump_fragmentation(int UU(f), FT h, int tsv) {
typedef struct {
int fd;
FT h;
FT ft;
uint64_t blocksizes;
uint64_t leafsizes;
uint64_t leafblocks;
......@@ -365,7 +365,7 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void
FTNODE n;
FTNODE_DISK_DATA ndd = NULL;
struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->h);
fill_bfe_for_full_read(&bfe, info->ft);
int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
if (r==0) {
info->blocksizes += size;
......@@ -379,12 +379,12 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void
return 0;
}
static void dump_nodesizes(int fd, FT h) {
static void dump_nodesizes(int fd, FT ft) {
frag_help_extra info;
memset(&info, 0, sizeof(info));
info.fd = fd;
info.h = h;
toku_blocktable_iterate(h->blocktable, TRANSLATION_CHECKPOINTED,
info.ft = ft;
toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED,
nodesizes_helper, &info, true, true);
printf("leafblocks\t%" PRIu64 "\n", info.leafblocks);
printf("blocksizes\t%" PRIu64 "\n", info.blocksizes);
......@@ -402,12 +402,12 @@ static void dump_garbage_stats(int fd, FT ft) {
typedef struct __dump_node_extra {
int fd;
FT h;
FT ft;
} dump_node_extra;
static int dump_node_wrapper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) {
dump_node_extra *CAST_FROM_VOIDP(info, extra);
dump_node(info->fd, b, info->h);
dump_node(info->fd, b, info->ft);
return 0;
}
......@@ -472,9 +472,9 @@ static void verify_block(unsigned char *cp, uint64_t file_offset, uint64_t size)
printf("offset %u expected %" PRIu64 "\n", offset, size);
}
static void dump_block(int fd, BLOCKNUM blocknum, FT h) {
static void dump_block(int fd, BLOCKNUM blocknum, FT ft) {
DISKOFF offset, size;
toku_translate_blocknum_to_offset_size(h->blocktable, blocknum, &offset, &size);
toku_translate_blocknum_to_offset_size(ft->blocktable, blocknum, &offset, &size);
printf("%" PRId64 " at %" PRId64 " size %" PRId64 "\n", blocknum.b, offset, size);
unsigned char *CAST_FROM_VOIDP(vp, toku_malloc(size));
......@@ -698,7 +698,7 @@ int main (int argc, const char *const argv[]) {
struct __dump_node_extra info;
info.fd = fd;
info.h = ft;
info.ft = ft;
toku_blocktable_iterate(ft->blocktable, TRANSLATION_CHECKPOINTED,
dump_node_wrapper, &info, true, true);
}
......
......@@ -618,7 +618,7 @@ int remove_txn (const FT &h, const uint32_t UU(idx), TOKUTXN const UU(txn))
return 0;
}
// for every BRT in txn, remove it.
// for every ft in txn, remove it.
static void note_txn_closing (TOKUTXN txn) {
txn->open_fts.iterate<struct tokutxn, remove_txn>(txn);
}
......
......@@ -103,7 +103,7 @@ const int envflags = DB_INIT_MPOOL |
DB_ENV* env;
unsigned int leaf_hits;
// Custom Update Function for our test BRT.
// Custom Update Function for our test FT.
static int
update_func(DB* UU(db),
const DBT* key,
......@@ -266,7 +266,7 @@ test_main(int argc, char * const argv[])
default_parse_args(argc, argv);
hot_test_setup();
// Create and Open the Database/BRT
// Create and Open the Database/FT
DB *db = NULL;
const unsigned int BIG = 4000000;
const unsigned int SMALL = 10;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment