Commit bde258f5 authored by John Esmet's avatar John Esmet Committed by Yoni Fogel

refs #5918 break down serialize/compress etc times into leaf vs nonleaf


git-svn-id: file:///svn/toku/tokudb@52331 c7de825b-a66e-492c-adef-691d508d4ae1
parent 73237dae
......@@ -636,8 +636,8 @@ STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
void toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe);
void toku_ft_status_update_flush_reason(FTNODE node, uint64_t uncompressed_bytes_flushed, uint64_t bytes_written, tokutime_t write_time, bool for_checkpoint);
void toku_ft_status_update_serialize_times(tokutime_t serialize_time, tokutime_t compress_time);
void toku_ft_status_update_deserialize_times(tokutime_t deserialize_time, tokutime_t decompress_time);
void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time);
void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time);
void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
void toku_ftnode_checkpoint_complete_callback(void *value_data);
......@@ -1044,10 +1044,14 @@ typedef enum {
FT_NUM_MSG_BUFFER_FETCHED_WRITE,
FT_BYTES_MSG_BUFFER_FETCHED_WRITE,
FT_TOKUTIME_MSG_BUFFER_FETCHED_WRITE,
FT_NODE_COMPRESS_TOKUTIME, // seconds spent compressing nodes to memory
FT_NODE_SERIALIZE_TOKUTIME, // seconds spent serializing nodes to memory
FT_NODE_DECOMPRESS_TOKUTIME, // seconds spent decompressing nodes to memory
FT_NODE_DESERIALIZE_TOKUTIME, // seconds spent deserializing nodes to memory
FT_LEAF_COMPRESS_TOKUTIME, // seconds spent compressing leaf leaf nodes to memory
FT_LEAF_SERIALIZE_TOKUTIME, // seconds spent serializing leaf node to memory
FT_LEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing leaf nodes to memory
FT_LEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing leaf nodes to memory
FT_NONLEAF_COMPRESS_TOKUTIME, // seconds spent compressing nonleaf nodes to memory
FT_NONLEAF_SERIALIZE_TOKUTIME, // seconds spent serializing nonleaf nodes to memory
FT_NONLEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing nonleaf nodes to memory
FT_NONLEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing nonleaf nodes to memory
FT_PRO_NUM_ROOT_SPLIT,
FT_PRO_NUM_ROOT_H0_INJECT,
FT_PRO_NUM_ROOT_H1_INJECT,
......
......@@ -259,10 +259,14 @@ status_init(void)
STATUS_INIT(FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT, TOKUTIME, "nonleaf nodes flushed to disk (for checkpoint) (seconds)");
// CPU time statistics for [de]serialization and [de]compression.
STATUS_INIT(FT_NODE_COMPRESS_TOKUTIME, TOKUTIME, "node compression to memory (seconds)");
STATUS_INIT(FT_NODE_SERIALIZE_TOKUTIME, TOKUTIME, "node serialization to memory (seconds)");
STATUS_INIT(FT_NODE_DECOMPRESS_TOKUTIME, TOKUTIME, "node decompression to memory (seconds)");
STATUS_INIT(FT_NODE_DESERIALIZE_TOKUTIME, TOKUTIME, "node deserialization to memory (seconds)");
STATUS_INIT(FT_LEAF_COMPRESS_TOKUTIME, TOKUTIME, "leaf compression to memory (seconds)");
STATUS_INIT(FT_LEAF_SERIALIZE_TOKUTIME, TOKUTIME, "leaf serialization to memory (seconds)");
STATUS_INIT(FT_LEAF_DECOMPRESS_TOKUTIME, TOKUTIME, "leaf decompression to memory (seconds)");
STATUS_INIT(FT_LEAF_DESERIALIZE_TOKUTIME, TOKUTIME, "leaf deserialization to memory (seconds)");
STATUS_INIT(FT_NONLEAF_COMPRESS_TOKUTIME, TOKUTIME, "nonleaf compression to memory (seconds)");
STATUS_INIT(FT_NONLEAF_SERIALIZE_TOKUTIME, TOKUTIME, "nonleaf serialization to memory (seconds)");
STATUS_INIT(FT_NONLEAF_DECOMPRESS_TOKUTIME, TOKUTIME, "nonleaf decompression to memory (seconds)");
STATUS_INIT(FT_NONLEAF_DESERIALIZE_TOKUTIME, TOKUTIME, "nonleaf deserialization to memory (seconds)");
// Promotion statistics.
STATUS_INIT(FT_PRO_NUM_ROOT_SPLIT, PARCOUNT, "promotion: roots split");
......@@ -1229,14 +1233,34 @@ ft_status_update_partial_fetch_reason(
}
}
void toku_ft_status_update_serialize_times(tokutime_t serialize_time, tokutime_t compress_time) {
STATUS_INC(FT_NODE_SERIALIZE_TOKUTIME, serialize_time);
STATUS_INC(FT_NODE_COMPRESS_TOKUTIME, compress_time);
void toku_ft_status_update_serialize_times(FTNODE node, tokutime_t serialize_time, tokutime_t compress_time) {
if (node->height == 0) {
STATUS_INC(FT_LEAF_SERIALIZE_TOKUTIME, serialize_time);
STATUS_INC(FT_LEAF_COMPRESS_TOKUTIME, compress_time);
} else {
STATUS_INC(FT_NONLEAF_SERIALIZE_TOKUTIME, serialize_time);
STATUS_INC(FT_NONLEAF_COMPRESS_TOKUTIME, compress_time);
}
static int calls;
if (calls++ % 10000) {
printf("height %d serialize %lf\n", node->height, tokutime_to_seconds(serialize_time));
printf("height %d compress %lf\n", node->height, tokutime_to_seconds(compress_time));
}
}
void toku_ft_status_update_deserialize_times(tokutime_t deserialize_time, tokutime_t decompress_time) {
STATUS_INC(FT_NODE_DESERIALIZE_TOKUTIME, deserialize_time);
STATUS_INC(FT_NODE_DECOMPRESS_TOKUTIME, decompress_time);
void toku_ft_status_update_deserialize_times(FTNODE node, tokutime_t deserialize_time, tokutime_t decompress_time) {
if (node->height == 0) {
STATUS_INC(FT_LEAF_DESERIALIZE_TOKUTIME, deserialize_time);
STATUS_INC(FT_LEAF_DECOMPRESS_TOKUTIME, decompress_time);
} else {
STATUS_INC(FT_NONLEAF_DESERIALIZE_TOKUTIME, deserialize_time);
STATUS_INC(FT_NONLEAF_DECOMPRESS_TOKUTIME, decompress_time);
}
static int calls;
if (calls++ % 10000) {
printf("height %d deserialize %lf\n", node->height, tokutime_to_seconds(deserialize_time));
printf("height %d decompress %lf\n", node->height, tokutime_to_seconds(decompress_time));
}
}
// callback for partially reading a node
......
......@@ -660,7 +660,7 @@ toku_create_compressed_partition_from_available(
memset(&st, 0, sizeof(st));
serialize_and_compress_partition(node, childnum, compression_method, sb, &st);
toku_ft_status_update_serialize_times(st.serialize_time, st.compress_time);
toku_ft_status_update_serialize_times(node, st.serialize_time, st.compress_time);
//
// now we have an sb that would be ready for being written out,
......@@ -829,7 +829,7 @@ int toku_serialize_ftnode_to_memory(FTNODE node,
// update the serialize times, ignore the header for simplicity. we captured all
// of the partitions' serialize times so that's probably good enough.
toku_ft_status_update_serialize_times(st.serialize_time, st.compress_time);
toku_ft_status_update_serialize_times(node, st.serialize_time, st.compress_time);
// now we have compressed each of our pieces into individual sub_blocks,
// we can put the header and all the subblocks into a single buffer
......@@ -1719,8 +1719,8 @@ deserialize_ftnode_header_from_rbuf_if_small_enough (FTNODE *ftnode,
*ftnode = node;
r = 0;
cleanup:
toku_ft_status_update_deserialize_times(deserialize_time, decompress_time);
cleanup:
toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
if (r != 0) {
if (node) {
toku_free(*ndd);
......@@ -2389,7 +2389,7 @@ deserialize_ftnode_from_rbuf(
t1 = toku_time_now();
deserialize_time = (t1 - t0) - decompress_time;
toku_ft_status_update_deserialize_times(deserialize_time, decompress_time);
toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
if (r != 0) {
// NOTE: Right now, callers higher in the stack will assert on
......@@ -2467,7 +2467,7 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
tokutime_t io_time = t1 - t0;
tokutime_t decompress_time = t2 - t1;
tokutime_t deserialize_time = t3 - t2;
toku_ft_status_update_deserialize_times(deserialize_time, decompress_time);
toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
bfe->bytes_read = rlen;
bfe->io_time = io_time;
......@@ -2507,7 +2507,7 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fet
tokutime_t decompress_time = t1 - t0;
tokutime_t deserialize_time = t2 - t1;
toku_ft_status_update_deserialize_times(deserialize_time, decompress_time);
toku_ft_status_update_deserialize_times(node, deserialize_time, decompress_time);
toku_free(curr_sb->compressed_ptr);
toku_free(curr_sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment