Commit 45794eac authored by John Esmet's avatar John Esmet

FT-279 Clean up ftnode_fetch_extra struct and, most importantly, its

initialization code
parent 3e8a2988
...@@ -209,7 +209,7 @@ toku_pin_ftnode_for_query( ...@@ -209,7 +209,7 @@ toku_pin_ftnode_for_query(
UNLOCKERS unlockers, UNLOCKERS unlockers,
ANCESTORS ancestors, ANCESTORS ancestors,
const pivot_bounds &bounds, const pivot_bounds &bounds,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p, FTNODE *node_p,
bool* msgs_applied) bool* msgs_applied)
...@@ -322,7 +322,7 @@ toku_pin_ftnode_with_dep_nodes( ...@@ -322,7 +322,7 @@ toku_pin_ftnode_with_dep_nodes(
FT ft, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
pair_lock_type lock_type, pair_lock_type lock_type,
uint32_t num_dependent_nodes, uint32_t num_dependent_nodes,
FTNODE *dependent_nodes, FTNODE *dependent_nodes,
...@@ -364,7 +364,7 @@ toku_pin_ftnode_with_dep_nodes( ...@@ -364,7 +364,7 @@ toku_pin_ftnode_with_dep_nodes(
void toku_pin_ftnode(FT ft, void toku_pin_ftnode(FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
pair_lock_type lock_type, pair_lock_type lock_type,
FTNODE *node_p, FTNODE *node_p,
bool move_messages) { bool move_messages) {
......
...@@ -148,7 +148,7 @@ toku_pin_ftnode_for_query( ...@@ -148,7 +148,7 @@ toku_pin_ftnode_for_query(
UNLOCKERS unlockers, UNLOCKERS unlockers,
ANCESTORS ancestors, ANCESTORS ancestors,
const pivot_bounds &bounds, const pivot_bounds &bounds,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p, FTNODE *node_p,
bool* msgs_applied bool* msgs_applied
...@@ -159,7 +159,7 @@ void toku_pin_ftnode( ...@@ -159,7 +159,7 @@ void toku_pin_ftnode(
FT ft, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
pair_lock_type lock_type, pair_lock_type lock_type,
FTNODE *node_p, FTNODE *node_p,
bool move_messages bool move_messages
...@@ -171,7 +171,7 @@ void toku_pin_ftnode_with_dep_nodes( ...@@ -171,7 +171,7 @@ void toku_pin_ftnode_with_dep_nodes(
FT ft, FT ft,
BLOCKNUM blocknum, BLOCKNUM blocknum,
uint32_t fullhash, uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe, ftnode_fetch_extra *bfe,
pair_lock_type lock_type, pair_lock_type lock_type,
uint32_t num_dependent_nodes, uint32_t num_dependent_nodes,
FTNODE *dependent_nodes, FTNODE *dependent_nodes,
......
...@@ -491,8 +491,8 @@ ct_maybe_merge_child(struct flusher_advice *fa, ...@@ -491,8 +491,8 @@ ct_maybe_merge_child(struct flusher_advice *fa,
uint32_t fullhash; uint32_t fullhash;
CACHEKEY root; CACHEKEY root;
toku_calculate_root_offset_pointer(ft, &root, &fullhash); toku_calculate_root_offset_pointer(ft, &root, &fullhash);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
toku_pin_ftnode(ft, root, fullhash, &bfe, PL_WRITE_EXPENSIVE, &root_node, true); toku_pin_ftnode(ft, root, fullhash, &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
toku_ftnode_assert_fully_in_memory(root_node); toku_ftnode_assert_fully_in_memory(root_node);
} }
...@@ -1075,8 +1075,8 @@ ft_split_child( ...@@ -1075,8 +1075,8 @@ ft_split_child(
static void bring_node_fully_into_memory(FTNODE node, FT ft) { static void bring_node_fully_into_memory(FTNODE node, FT ft) {
if (!toku_ftnode_fully_in_memory(node)) { if (!toku_ftnode_fully_in_memory(node)) {
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
toku_cachetable_pf_pinned_pair( toku_cachetable_pf_pinned_pair(
node, node,
toku_ftnode_pf_callback, toku_ftnode_pf_callback,
...@@ -1379,8 +1379,8 @@ ft_merge_child( ...@@ -1379,8 +1379,8 @@ ft_merge_child(
FTNODE childa, childb; FTNODE childa, childb;
{ {
uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnuma); uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnuma);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &node, &childa, true); toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &node, &childa, true);
} }
// for test // for test
...@@ -1390,8 +1390,8 @@ ft_merge_child( ...@@ -1390,8 +1390,8 @@ ft_merge_child(
dep_nodes[0] = node; dep_nodes[0] = node;
dep_nodes[1] = childa; dep_nodes[1] = childa;
uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnumb); uint32_t childfullhash = compute_child_fullhash(ft->cf, node, childnumb);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 2, dep_nodes, &childb, true); toku_pin_ftnode_with_dep_nodes(ft, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, PL_WRITE_EXPENSIVE, 2, dep_nodes, &childb, true);
} }
...@@ -1520,10 +1520,10 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa) ...@@ -1520,10 +1520,10 @@ void toku_ft_flush_some_child(FT ft, FTNODE parent, struct flusher_advice *fa)
ft->blocktable.verify_blocknum_allocated(targetchild); ft->blocktable.verify_blocknum_allocated(targetchild);
uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum); uint32_t childfullhash = compute_child_fullhash(ft->cf, parent, childnum);
FTNODE child; FTNODE child;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
// Note that we don't read the entire node into memory yet. // Note that we don't read the entire node into memory yet.
// The idea is let's try to do the minimum work before releasing the parent lock // The idea is let's try to do the minimum work before releasing the parent lock
fill_bfe_for_min_read(&bfe, ft); bfe.create_for_min_read(ft);
toku_pin_ftnode_with_dep_nodes(ft, targetchild, childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &parent, &child, true); toku_pin_ftnode_with_dep_nodes(ft, targetchild, childfullhash, &bfe, PL_WRITE_EXPENSIVE, 1, &parent, &child, true);
// for test // for test
......
...@@ -328,8 +328,8 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right, ...@@ -328,8 +328,8 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right,
// Get root node (the first parent of each successive HOT // Get root node (the first parent of each successive HOT
// call.) // call.)
toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_handle->ft); bfe.create_for_full_read(ft_handle->ft);
toku_pin_ftnode(ft_handle->ft, toku_pin_ftnode(ft_handle->ft,
(BLOCKNUM) root_key, (BLOCKNUM) root_key,
fullhash, fullhash,
......
...@@ -298,6 +298,92 @@ struct ft_handle { ...@@ -298,6 +298,92 @@ struct ft_handle {
PAIR_ATTR make_ftnode_pair_attr(FTNODE node); PAIR_ATTR make_ftnode_pair_attr(FTNODE node);
PAIR_ATTR make_invalid_pair_attr(void); PAIR_ATTR make_invalid_pair_attr(void);
//
// Field in ftnode_fetch_extra that tells the
// partial fetch callback what piece of the node
// is needed by the ydb
//
enum ftnode_fetch_type {
ftnode_fetch_none = 1, // no partitions needed.
ftnode_fetch_subset, // some subset of partitions needed
ftnode_fetch_prefetch, // this is part of a prefetch call
ftnode_fetch_all, // every partition is needed
ftnode_fetch_keymatch, // one child is needed if it holds both keys
};
// Info passed to cachetable fetch callbacks to say which parts of a node
// should be fetched (perhaps a subset, perhaps the whole thing, depending
// on operation)
class ftnode_fetch_extra {
public:
// Used when the whole node must be in memory, such as for flushes.
void create_for_full_read(FT ft);
// A subset of children are necessary. Used by point queries.
void create_for_subset_read(FT ft, ft_search *search, const DBT *left, const DBT *right,
bool left_is_neg_infty, bool right_is_pos_infty,
bool disable_prefetching, bool read_all_partitions);
// No partitions are necessary - only pivots and/or subtree estimates.
// Currently used for stat64.
void create_for_min_read(FT ft);
// Used to prefetch partitions that fall within the bounds given by the cursor.
void create_for_prefetch(FT ft, struct ft_cursor *cursor);
// Only a portion of the node (within a keyrange) is required.
// Used by keysrange when the left and right key are in the same basement node.
void create_for_keymatch(FT ft, const DBT *left, const DBT *right,
bool disable_prefetching, bool read_all_partitions);
void destroy(void);
// return: true if a specific childnum is required to be in memory
bool wants_child_available(int childnum) const;
// return: the childnum of the leftmost child that is required to be in memory
int leftmost_child_wanted(FTNODE node) const;
// return: the childnum of the rightmost child that is required to be in memory
int rightmost_child_wanted(FTNODE node) const;
// needed for reading a node off disk
FT ft;
enum ftnode_fetch_type type;
// used in the case where type == ftnode_fetch_subset
// parameters needed to find out which child needs to be decompressed (so it can be read)
ft_search *search;
DBT range_lock_left_key, range_lock_right_key;
bool left_is_neg_infty, right_is_pos_infty;
// states if we should try to aggressively fetch basement nodes
// that are not specifically needed for current query,
// but may be needed for other cursor operations user is doing
// For example, if we have not disabled prefetching,
// and the user is doing a dictionary wide scan, then
// even though a query may only want one basement node,
// we fetch all basement nodes in a leaf node.
bool disable_prefetching;
// this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback
// thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it
int child_to_read;
// when we read internal nodes, we want to read all the data off disk in one I/O
// then we'll treat it as normal and only decompress the needed partitions etc.
bool read_all_partitions;
// Accounting: How many bytes were read, and how much time did we spend doing I/O?
uint64_t bytes_read;
tokutime_t io_time;
tokutime_t decompress_time;
tokutime_t deserialize_time;
private:
void _create_internal(FT ft_);
};
// Only exported for tests. // Only exported for tests.
// Cachetable callbacks for ftnodes. // Cachetable callbacks for ftnodes.
...@@ -333,47 +419,6 @@ STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode); ...@@ -333,47 +419,6 @@ STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
void toku_verify_or_set_counts(FTNODE); void toku_verify_or_set_counts(FTNODE);
//
// Helper function to fill a ftnode_fetch_extra with data
// that will tell the fetch callback that the entire node is
// necessary. Used in cases where the entire node
// is required, such as for flushes.
//
void fill_bfe_for_full_read(struct ftnode_fetch_extra *bfe, FT ft);
//
// Helper function to fill a ftnode_fetch_extra with data
// that will tell the fetch callback that an explicit range of children is
// necessary. Used in cases where the portion of the node that is required
// is known in advance, e.g. for keysrange when the left and right key
// are in the same basement node.
//
void fill_bfe_for_keymatch(struct ftnode_fetch_extra *bfe, FT ft,
const DBT *left, const DBT *right,
bool disable_prefetching, bool read_all_partitions);
//
// Helper function to fill a ftnode_fetch_extra with data
// that will tell the fetch callback that some subset of the node
// necessary. Used in cases where some of the node is required
// such as for a point query.
//
void fill_bfe_for_subset_read(struct ftnode_fetch_extra *bfe, FT ft, ft_search *search,
const DBT *left, const DBT *right,
bool left_is_neg_infty, bool right_is_pos_infty,
bool disable_prefetching, bool read_all_partitions);
//
// Helper function to fill a ftnode_fetch_extra with data
// that will tell the fetch callback that no partitions are
// necessary, only the pivots and/or subtree estimates.
// Currently used for stat64.
//
void fill_bfe_for_min_read(struct ftnode_fetch_extra *bfe, FT ft);
void fill_bfe_for_prefetch(struct ftnode_fetch_extra *bfe, FT ft, struct ft_cursor *cursor);
void destroy_bfe_for_prefetch(struct ftnode_fetch_extra *bfe);
// TODO: consider moving this to ft/pivotkeys.cc // TODO: consider moving this to ft/pivotkeys.cc
class pivot_bounds { class pivot_bounds {
public: public:
...@@ -396,11 +441,6 @@ class pivot_bounds { ...@@ -396,11 +441,6 @@ class pivot_bounds {
const DBT _upper_bound_inclusive; const DBT _upper_bound_inclusive;
}; };
// TODO: move into the ftnode_fetch_extra class
bool toku_bfe_wants_child_available (struct ftnode_fetch_extra* bfe, int childnum);
int toku_bfe_leftmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node);
int toku_bfe_rightmost_child_wanted(struct ftnode_fetch_extra *bfe, FTNODE node);
// allocate a block number // allocate a block number
// allocate and initialize a ftnode // allocate and initialize a ftnode
// put the ftnode into the cache table // put the ftnode into the cache table
...@@ -584,7 +624,7 @@ typedef struct { ...@@ -584,7 +624,7 @@ typedef struct {
TOKU_ENGINE_STATUS_ROW_S status[FT_STATUS_NUM_ROWS]; TOKU_ENGINE_STATUS_ROW_S status[FT_STATUS_NUM_ROWS];
} FT_STATUS_S, *FT_STATUS; } FT_STATUS_S, *FT_STATUS;
void toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe); void toku_ft_status_update_pivot_fetch_reason(ftnode_fetch_extra *bfe);
void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint); void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint);
void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time); void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time);
void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time); void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time);
......
This diff is collapsed.
...@@ -177,8 +177,8 @@ int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return ...@@ -177,8 +177,8 @@ int toku_testsetup_get_sersize(FT_HANDLE ft_handle, BLOCKNUM diskoff) // Return
{ {
assert(testsetup_initialized); assert(testsetup_initialized);
void *node_v; void *node_v;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_handle->ft); bfe.create_for_full_read(ft_handle->ft);
int r = toku_cachetable_get_and_pin( int r = toku_cachetable_get_and_pin(
ft_handle->ft->cf, diskoff, ft_handle->ft->cf, diskoff,
toku_cachetable_hash(ft_handle->ft->cf, diskoff), toku_cachetable_hash(ft_handle->ft->cf, diskoff),
...@@ -204,8 +204,8 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const ...@@ -204,8 +204,8 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const
assert(testsetup_initialized); assert(testsetup_initialized);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_handle->ft); bfe.create_for_full_read(ft_handle->ft);
r = toku_cachetable_get_and_pin( r = toku_cachetable_get_and_pin(
ft_handle->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
...@@ -258,8 +258,8 @@ testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) ...@@ -258,8 +258,8 @@ testhelper_string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
void void
toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t) toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t)
{ {
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
b, b,
...@@ -277,8 +277,8 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en ...@@ -277,8 +277,8 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en
assert(testsetup_initialized); assert(testsetup_initialized);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_handle->ft); bfe.create_for_full_read(ft_handle->ft);
r = toku_cachetable_get_and_pin( r = toku_cachetable_get_and_pin(
ft_handle->ft->cf, ft_handle->ft->cf,
blocknum, blocknum,
......
...@@ -288,8 +288,8 @@ toku_get_node_for_verify( ...@@ -288,8 +288,8 @@ toku_get_node_for_verify(
) )
{ {
uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum); uint32_t fullhash = toku_cachetable_hash(ft_handle->ft->cf, blocknum);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_handle->ft); bfe.create_for_full_read(ft_handle->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft_handle->ft, ft_handle->ft,
blocknum, blocknum,
......
...@@ -1045,8 +1045,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e ...@@ -1045,8 +1045,8 @@ garbage_helper(BLOCKNUM blocknum, int64_t UU(size), int64_t UU(address), void *e
struct garbage_helper_extra *CAST_FROM_VOIDP(info, extra); struct garbage_helper_extra *CAST_FROM_VOIDP(info, extra);
FTNODE node; FTNODE node;
FTNODE_DISK_DATA ndd; FTNODE_DISK_DATA ndd;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->ft); bfe.create_for_full_read(info->ft);
int fd = toku_cachefile_get_fd(info->ft->cf); int fd = toku_cachefile_get_fd(info->ft->cf);
int r = toku_deserialize_ftnode_from(fd, blocknum, 0, &node, &ndd, &bfe); int r = toku_deserialize_ftnode_from(fd, blocknum, 0, &node, &ndd, &bfe);
if (r != 0) { if (r != 0) {
......
...@@ -366,73 +366,6 @@ void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, in ...@@ -366,73 +366,6 @@ void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, in
int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp); int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp);
void toku_ftnode_save_ct_pair(CACHEKEY key, void *value_data, PAIR p); void toku_ftnode_save_ct_pair(CACHEKEY key, void *value_data, PAIR p);
//
// Field in ftnode_fetch_extra that tells the
// partial fetch callback what piece of the node
// is needed by the ydb
//
enum ftnode_fetch_type {
ftnode_fetch_none=1, // no partitions needed.
ftnode_fetch_subset, // some subset of partitions needed
ftnode_fetch_prefetch, // this is part of a prefetch call
ftnode_fetch_all, // every partition is needed
ftnode_fetch_keymatch, // one child is needed if it holds both keys
};
static bool is_valid_ftnode_fetch_type(enum ftnode_fetch_type type) UU();
static bool is_valid_ftnode_fetch_type(enum ftnode_fetch_type type) {
switch (type) {
case ftnode_fetch_none:
case ftnode_fetch_subset:
case ftnode_fetch_prefetch:
case ftnode_fetch_all:
case ftnode_fetch_keymatch:
return true;
default:
return false;
}
}
//
// An extra parameter passed to cachetable functions
// That is used in all types of fetch callbacks.
// The contents help the partial fetch and fetch
// callbacks retrieve the pieces of a node necessary
// for the ensuing operation (flush, query, ...)
//
struct ft_search;
struct ftnode_fetch_extra {
enum ftnode_fetch_type type;
// needed for reading a node off disk
FT ft;
// used in the case where type == ftnode_fetch_subset
// parameters needed to find out which child needs to be decompressed (so it can be read)
ft_search *search;
DBT range_lock_left_key, range_lock_right_key;
bool left_is_neg_infty, right_is_pos_infty;
// states if we should try to aggressively fetch basement nodes
// that are not specifically needed for current query,
// but may be needed for other cursor operations user is doing
// For example, if we have not disabled prefetching,
// and the user is doing a dictionary wide scan, then
// even though a query may only want one basement node,
// we fetch all basement nodes in a leaf node.
bool disable_prefetching;
// this value will be set during the fetch_callback call by toku_ftnode_fetch_callback or toku_ftnode_pf_req_callback
// thi callbacks need to evaluate this anyway, so we cache it here so the search code does not reevaluate it
int child_to_read;
// when we read internal nodes, we want to read all the data off disk in one I/O
// then we'll treat it as normal and only decompress the needed partitions etc.
bool read_all_partitions;
// Accounting: How many bytes were read, and how much time did we spend doing I/O?
uint64_t bytes_read;
tokutime_t io_time;
tokutime_t decompress_time;
tokutime_t deserialize_time;
};
typedef struct ftnode_fetch_extra *FTNODE_FETCH_EXTRA;
// //
// TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces // TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces
// //
......
This diff is collapsed.
...@@ -108,9 +108,9 @@ int toku_serialize_rollback_log_to(int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROL ...@@ -108,9 +108,9 @@ int toku_serialize_rollback_log_to(int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROL
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized); void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft); int toku_deserialize_rollback_log_from(int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE *logp, FT ft);
int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, struct ftnode_fetch_extra *bfe); int toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, int fd, ftnode_fetch_extra *bfe);
int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fetch_extra *bfe); int toku_deserialize_bp_from_compressed(FTNODE node, int childnum, ftnode_fetch_extra *bfe);
int toku_deserialize_ftnode_from(int fd, BLOCKNUM off, uint32_t fullhash, FTNODE *node, FTNODE_DISK_DATA *ndd, struct ftnode_fetch_extra *bfe); int toku_deserialize_ftnode_from(int fd, BLOCKNUM off, uint32_t fullhash, FTNODE *node, FTNODE_DISK_DATA *ndd, ftnode_fetch_extra *bfe);
// used by nonleaf node partial eviction // used by nonleaf node partial eviction
void toku_create_compressed_partition_from_available(FTNODE node, int childnum, void toku_create_compressed_partition_from_available(FTNODE node, int childnum,
......
...@@ -114,12 +114,12 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -114,12 +114,12 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
cursor->right_is_pos_infty = true; cursor->right_is_pos_infty = true;
cursor->disable_prefetching = false; cursor->disable_prefetching = false;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
// quick test to see that we have the right behavior when we set // quick test to see that we have the right behavior when we set
// disable_prefetching to true // disable_prefetching to true
cursor->disable_prefetching = true; cursor->disable_prefetching = true;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
...@@ -131,14 +131,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -131,14 +131,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_free(ndd); toku_free(ndd);
// now enable prefetching again // now enable prefetching again
cursor->disable_prefetching = false; cursor->disable_prefetching = false;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
...@@ -153,14 +153,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -153,14 +153,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_free(ndd); toku_free(ndd);
uint64_t left_key = 150; uint64_t left_key = 150;
toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t)); toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(uint64_t));
cursor->left_is_neg_infty = false; cursor->left_is_neg_infty = false;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
...@@ -175,14 +175,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -175,14 +175,14 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_free(ndd); toku_free(ndd);
uint64_t right_key = 151; uint64_t right_key = 151;
toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t)); toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(uint64_t));
cursor->right_is_pos_infty = false; cursor->right_is_pos_infty = false;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
...@@ -197,13 +197,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -197,13 +197,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_free(ndd); toku_free(ndd);
left_key = 100000; left_key = 100000;
right_key = 100000; right_key = 100000;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
...@@ -218,13 +218,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -218,13 +218,13 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_free(ndd); toku_free(ndd);
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
left_key = 100; left_key = 100;
right_key = 100; right_key = 100;
fill_bfe_for_prefetch(&bfe, ft_h, cursor); bfe.create_for_prefetch( ft_h, cursor);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
...@@ -239,7 +239,7 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -239,7 +239,7 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); bfe.destroy();
toku_ftnode_free(&dn); toku_ftnode_free(&dn);
toku_free(ndd); toku_free(ndd);
...@@ -260,15 +260,14 @@ test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -260,15 +260,14 @@ test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
cursor->left_is_neg_infty = true; cursor->left_is_neg_infty = true;
cursor->right_is_pos_infty = true; cursor->right_is_pos_infty = true;
struct ftnode_fetch_extra bfe;
uint64_t left_key = 150; uint64_t left_key = 150;
uint64_t right_key = 151; uint64_t right_key = 151;
DBT left, right; DBT left, right;
toku_fill_dbt(&left, &left_key, sizeof(left_key)); toku_fill_dbt(&left, &left_key, sizeof(left_key));
toku_fill_dbt(&right, &right_key, sizeof(right_key)); toku_fill_dbt(&right, &right_key, sizeof(right_key));
fill_bfe_for_subset_read(
&bfe, ftnode_fetch_extra bfe;
bfe.create_for_subset_read(
ft_h, ft_h,
NULL, NULL,
&left, &left,
......
...@@ -146,8 +146,8 @@ le_malloc(bn_data* bn, uint32_t idx, const char *key, const char *val) ...@@ -146,8 +146,8 @@ le_malloc(bn_data* bn, uint32_t idx, const char *key, const char *val)
static void static void
test1(int fd, FT ft_h, FTNODE *dn) { test1(int fd, FT ft_h, FTNODE *dn) {
int r; int r;
struct ftnode_fetch_extra bfe_all; ftnode_fetch_extra bfe_all;
fill_bfe_for_full_read(&bfe_all, ft_h); bfe_all.create_for_full_read(ft_h);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all);
bool is_leaf = ((*dn)->height == 0); bool is_leaf = ((*dn)->height == 0);
...@@ -217,7 +217,6 @@ static int search_cmp(const struct ft_search& UU(so), const DBT* UU(key)) { ...@@ -217,7 +217,6 @@ static int search_cmp(const struct ft_search& UU(so), const DBT* UU(key)) {
static void static void
test2(int fd, FT ft_h, FTNODE *dn) { test2(int fd, FT ft_h, FTNODE *dn) {
struct ftnode_fetch_extra bfe_subset;
DBT left, right; DBT left, right;
DB dummy_db; DB dummy_db;
memset(&dummy_db, 0, sizeof(dummy_db)); memset(&dummy_db, 0, sizeof(dummy_db));
...@@ -225,8 +224,8 @@ test2(int fd, FT ft_h, FTNODE *dn) { ...@@ -225,8 +224,8 @@ test2(int fd, FT ft_h, FTNODE *dn) {
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
ft_search search; ft_search search;
fill_bfe_for_subset_read( ftnode_fetch_extra bfe_subset;
&bfe_subset, bfe_subset.create_for_subset_read(
ft_h, ft_h,
ft_search_init(&search, search_cmp, FT_SEARCH_LEFT, nullptr, nullptr, nullptr), ft_search_init(&search, search_cmp, FT_SEARCH_LEFT, nullptr, nullptr, nullptr),
&left, &left,
...@@ -236,6 +235,7 @@ test2(int fd, FT ft_h, FTNODE *dn) { ...@@ -236,6 +235,7 @@ test2(int fd, FT ft_h, FTNODE *dn) {
false, false,
false false
); );
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_subset); int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_subset);
assert(r==0); assert(r==0);
...@@ -270,17 +270,15 @@ test2(int fd, FT ft_h, FTNODE *dn) { ...@@ -270,17 +270,15 @@ test2(int fd, FT ft_h, FTNODE *dn) {
static void static void
test3_leaf(int fd, FT ft_h, FTNODE *dn) { test3_leaf(int fd, FT ft_h, FTNODE *dn) {
struct ftnode_fetch_extra bfe_min;
DBT left, right; DBT left, right;
DB dummy_db; DB dummy_db;
memset(&dummy_db, 0, sizeof(dummy_db)); memset(&dummy_db, 0, sizeof(dummy_db));
memset(&left, 0, sizeof(left)); memset(&left, 0, sizeof(left));
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
fill_bfe_for_min_read( ftnode_fetch_extra bfe_min;
&bfe_min, bfe_min.create_for_min_read(ft_h);
ft_h
);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min); int r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min);
assert(r==0); assert(r==0);
......
...@@ -247,9 +247,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de ...@@ -247,9 +247,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de
total_start.tv_sec = total_start.tv_usec = 0; total_start.tv_sec = total_start.tv_usec = 0;
total_end.tv_sec = total_end.tv_usec = 0; total_end.tv_sec = total_end.tv_usec = 0;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
for (int i = 0; i < deser_runs; i++) { for (int i = 0; i < deser_runs; i++) {
fill_bfe_for_full_read(&bfe, ft_h); bfe.create_for_full_read(ft_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd2 = NULL; FTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
...@@ -392,8 +392,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -392,8 +392,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
dt *= 1000; dt *= 1000;
printf("serialize nonleaf(ms): %0.05lf (IGNORED RUNS=%d)\n", dt, ser_runs); printf("serialize nonleaf(ms): %0.05lf (IGNORED RUNS=%d)\n", dt, ser_runs);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_h); bfe.create_for_full_read(ft_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd2 = NULL; FTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
......
...@@ -165,14 +165,14 @@ static void ...@@ -165,14 +165,14 @@ static void
setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) { setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) {
int r; int r;
if (bft == read_all) { if (bft == read_all) {
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_h); bfe.create_for_full_read(ft_h);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe);
assert(r==0); assert(r==0);
} }
else if (bft == read_compressed || bft == read_none) { else if (bft == read_compressed || bft == read_none) {
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, ft_h); bfe.create_for_min_read(ft_h);
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, ndd, &bfe);
assert(r==0); assert(r==0);
// assert all bp's are compressed or on disk. // assert all bp's are compressed or on disk.
...@@ -199,7 +199,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D ...@@ -199,7 +199,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D
// that it is available // that it is available
// then run partial eviction to get it compressed // then run partial eviction to get it compressed
PAIR_ATTR attr; PAIR_ATTR attr;
fill_bfe_for_full_read(&bfe, ft_h); bfe.create_for_full_read(ft_h);
assert(toku_ftnode_pf_req_callback(*dn, &bfe)); assert(toku_ftnode_pf_req_callback(*dn, &bfe));
r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
assert(r==0); assert(r==0);
...@@ -221,7 +221,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D ...@@ -221,7 +221,7 @@ setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_D
} }
} }
// now decompress them // now decompress them
fill_bfe_for_full_read(&bfe, ft_h); bfe.create_for_full_read(ft_h);
assert(toku_ftnode_pf_req_callback(*dn, &bfe)); assert(toku_ftnode_pf_req_callback(*dn, &bfe));
PAIR_ATTR attr; PAIR_ATTR attr;
r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr); r = toku_ftnode_pf_callback(*dn, *ndd, &bfe, fd, &attr);
......
...@@ -227,8 +227,8 @@ doit (bool after_child_pin) { ...@@ -227,8 +227,8 @@ doit (bool after_child_pin) {
); );
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_root, node_root,
...@@ -282,7 +282,7 @@ doit (bool after_child_pin) { ...@@ -282,7 +282,7 @@ doit (bool after_child_pin) {
// //
// now pin the root, verify that we have a message in there, and that it is clean // now pin the root, verify that we have a message in there, and that it is clean
// //
fill_bfe_for_full_read(&bfe, c_ft->ft); bfe.create_for_full_read(c_ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
c_ft->ft, c_ft->ft,
node_root, node_root,
......
...@@ -245,8 +245,8 @@ doit (int state) { ...@@ -245,8 +245,8 @@ doit (int state) {
toku_unpin_ftnode(t->ft, node); toku_unpin_ftnode(t->ft, node);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
t->ft, t->ft,
node_root, node_root,
...@@ -305,7 +305,7 @@ doit (int state) { ...@@ -305,7 +305,7 @@ doit (int state) {
// //
// now pin the root, verify that the state is what we expect // now pin the root, verify that the state is what we expect
// //
fill_bfe_for_full_read(&bfe, c_ft->ft); bfe.create_for_full_read(c_ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
c_ft->ft, c_ft->ft,
node_root, node_root,
......
...@@ -265,8 +265,8 @@ doit (int state) { ...@@ -265,8 +265,8 @@ doit (int state) {
toku_unpin_ftnode(t->ft, node); toku_unpin_ftnode(t->ft, node);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_root, node_root,
...@@ -321,7 +321,7 @@ doit (int state) { ...@@ -321,7 +321,7 @@ doit (int state) {
// //
// now pin the root, verify that the state is what we expect // now pin the root, verify that the state is what we expect
// //
fill_bfe_for_full_read(&bfe, c_ft->ft); bfe.create_for_full_read(c_ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
c_ft->ft, c_ft->ft,
node_root, node_root,
......
...@@ -241,8 +241,8 @@ doit (bool after_split) { ...@@ -241,8 +241,8 @@ doit (bool after_split) {
); );
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_root, node_root,
...@@ -297,7 +297,7 @@ doit (bool after_split) { ...@@ -297,7 +297,7 @@ doit (bool after_split) {
// //
// now pin the root, verify that we have a message in there, and that it is clean // now pin the root, verify that we have a message in there, and that it is clean
// //
fill_bfe_for_full_read(&bfe, c_ft->ft); bfe.create_for_full_read(c_ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
c_ft->ft, c_ft->ft,
node_root, node_root,
......
...@@ -237,8 +237,8 @@ doit (void) { ...@@ -237,8 +237,8 @@ doit (void) {
// now lock and release the leaf node to make sure it is what we expect it to be. // now lock and release the leaf node to make sure it is what we expect it to be.
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
ft->ft, ft->ft,
node_leaf, node_leaf,
...@@ -268,7 +268,7 @@ doit (void) { ...@@ -268,7 +268,7 @@ doit (void) {
// node is in memory and another is // node is in memory and another is
// on disk // on disk
// //
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
ft->ft, ft->ft,
node_leaf, node_leaf,
...@@ -289,7 +289,7 @@ doit (void) { ...@@ -289,7 +289,7 @@ doit (void) {
// //
// now let us induce a clean on the internal node // now let us induce a clean on the internal node
// //
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
ft->ft, ft->ft,
node_internal, node_internal,
...@@ -314,7 +314,7 @@ doit (void) { ...@@ -314,7 +314,7 @@ doit (void) {
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode_with_dep_nodes( toku_pin_ftnode_with_dep_nodes(
ft->ft, ft->ft,
node_internal, node_internal,
......
...@@ -243,8 +243,8 @@ doit (bool keep_other_bn_in_memory) { ...@@ -243,8 +243,8 @@ doit (bool keep_other_bn_in_memory) {
assert_zero(r); assert_zero(r);
// now lock and release the leaf node to make sure it is what we expect it to be. // now lock and release the leaf node to make sure it is what we expect it to be.
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
node_leaf, node_leaf,
...@@ -280,7 +280,7 @@ doit (bool keep_other_bn_in_memory) { ...@@ -280,7 +280,7 @@ doit (bool keep_other_bn_in_memory) {
// but only one should have broadcast message // but only one should have broadcast message
// applied. // applied.
// //
fill_bfe_for_full_read(&bfe, ft->ft); bfe.create_for_full_read(ft->ft);
} }
else { else {
// //
...@@ -289,7 +289,7 @@ doit (bool keep_other_bn_in_memory) { ...@@ -289,7 +289,7 @@ doit (bool keep_other_bn_in_memory) {
// node is in memory and another is // node is in memory and another is
// on disk // on disk
// //
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
} }
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
...@@ -314,7 +314,7 @@ doit (bool keep_other_bn_in_memory) { ...@@ -314,7 +314,7 @@ doit (bool keep_other_bn_in_memory) {
// //
// now let us induce a clean on the internal node // now let us induce a clean on the internal node
// //
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
node_internal, node_internal,
...@@ -337,7 +337,7 @@ doit (bool keep_other_bn_in_memory) { ...@@ -337,7 +337,7 @@ doit (bool keep_other_bn_in_memory) {
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
node_internal, node_internal,
......
...@@ -180,8 +180,8 @@ doit (void) { ...@@ -180,8 +180,8 @@ doit (void) {
// the root, one in each buffer, let's verify this. // the root, one in each buffer, let's verify this.
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_root, node_root,
...@@ -210,7 +210,7 @@ doit (void) { ...@@ -210,7 +210,7 @@ doit (void) {
// at this point, we have should have flushed // at this point, we have should have flushed
// only the middle buffer, let's verify this. // only the middle buffer, let's verify this.
node = NULL; node = NULL;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_root, node_root,
......
...@@ -229,8 +229,8 @@ doit (void) { ...@@ -229,8 +229,8 @@ doit (void) {
r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair); r = toku_ft_lookup(ft, toku_fill_dbt(&k, "a", 2), lookup_checkf, &pair);
assert(r==0); assert(r==0);
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
node_internal, node_internal,
...@@ -252,7 +252,7 @@ doit (void) { ...@@ -252,7 +252,7 @@ doit (void) {
); );
// verify that node_internal's buffer is empty // verify that node_internal's buffer is empty
fill_bfe_for_min_read(&bfe, ft->ft); bfe.create_for_min_read(ft->ft);
toku_pin_ftnode( toku_pin_ftnode(
ft->ft, ft->ft,
node_internal, node_internal,
......
...@@ -167,8 +167,8 @@ static void test_oldest_referenced_xid_gets_propogated(void) { ...@@ -167,8 +167,8 @@ static void test_oldest_referenced_xid_gets_propogated(void) {
// first verify the child // first verify the child
FTNODE node = NULL; FTNODE node = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, t->ft); bfe.create_for_min_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
child_nonleaf_blocknum, child_nonleaf_blocknum,
......
...@@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: ...@@ -90,7 +90,7 @@ PATENT RIGHTS GRANT:
// it used to be the case that we copied the left and right keys of a // it used to be the case that we copied the left and right keys of a
// range to be prelocked but never freed them, this test checks that they // range to be prelocked but never freed them, this test checks that they
// are freed (as of this time, this happens in destroy_bfe_for_prefetch) // are freed (as of this time, this happens in ftnode_fetch_extra::destroy())
#include "test.h" #include "test.h"
......
...@@ -90,7 +90,7 @@ PATENT RIGHTS GRANT: ...@@ -90,7 +90,7 @@ PATENT RIGHTS GRANT:
// it used to be the case that we copied the left and right keys of a // it used to be the case that we copied the left and right keys of a
// range to be prelocked but never freed them, this test checks that they // range to be prelocked but never freed them, this test checks that they
// are freed (as of this time, this happens in destroy_bfe_for_prefetch) // are freed (as of this time, this happens in ftnode_fetch_extra::destroy())
#include "test.h" #include "test.h"
......
...@@ -147,8 +147,8 @@ doit (void) { ...@@ -147,8 +147,8 @@ doit (void) {
// then node_internal should be huge // then node_internal should be huge
// we pin it and verify that it is not // we pin it and verify that it is not
FTNODE node; FTNODE node;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, t->ft); bfe.create_for_full_read(t->ft);
toku_pin_ftnode( toku_pin_ftnode(
t->ft, t->ft,
node_internal, node_internal,
......
...@@ -143,8 +143,8 @@ static void test_split_merge(void) { ...@@ -143,8 +143,8 @@ static void test_split_merge(void) {
BLOCKNUM root_blocknum = ft->h->root_blocknum; BLOCKNUM root_blocknum = ft->h->root_blocknum;
FTNODE root_node; FTNODE root_node;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
toku_pin_ftnode(ft, root_blocknum, toku_pin_ftnode(ft, root_blocknum,
toku_cachetable_hash(ft->cf, ft->h->root_blocknum), toku_cachetable_hash(ft->cf, ft->h->root_blocknum),
&bfe, PL_WRITE_EXPENSIVE, &root_node, true); &bfe, PL_WRITE_EXPENSIVE, &root_node, true);
......
...@@ -229,9 +229,9 @@ static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le, ...@@ -229,9 +229,9 @@ static int print_le(const void* key, const uint32_t keylen, const LEAFENTRY &le,
static void dump_node(int fd, BLOCKNUM blocknum, FT ft) { static void dump_node(int fd, BLOCKNUM blocknum, FT ft) {
FTNODE n; FTNODE n;
struct ftnode_fetch_extra bfe; FTNODE_DISK_DATA ndd = nullptr;
FTNODE_DISK_DATA ndd = NULL; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft); bfe.create_for_full_read(ft);
int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); int r = toku_deserialize_ftnode_from (fd, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
assert_zero(r); assert_zero(r);
assert(n!=0); assert(n!=0);
...@@ -366,8 +366,8 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void ...@@ -366,8 +366,8 @@ static int nodesizes_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void
frag_help_extra *CAST_FROM_VOIDP(info, extra); frag_help_extra *CAST_FROM_VOIDP(info, extra);
FTNODE n; FTNODE n;
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
struct ftnode_fetch_extra bfe; ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->ft); bfe.create_for_full_read(info->ft);
int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe); int r = toku_deserialize_ftnode_from(info->fd, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
if (r==0) { if (r==0) {
info->blocksizes += size; info->blocksizes += size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment