Commit 66bfdfc9 authored by John Esmet's avatar John Esmet Committed by Yoni Fogel

refs #5779 merge new accounting to main


git-svn-id: file:///svn/toku/tokudb@51141 c7de825b-a66e-492c-adef-691d508d4ae1
parent 17476899
...@@ -513,8 +513,6 @@ struct ft_handle { ...@@ -513,8 +513,6 @@ struct ft_handle {
struct ft_options options; struct ft_options options;
}; };
// FIXME needs toku prefix
long ftnode_memory_size (FTNODE node);
PAIR_ATTR make_ftnode_pair_attr(FTNODE node); PAIR_ATTR make_ftnode_pair_attr(FTNODE node);
PAIR_ATTR make_invalid_pair_attr(void); PAIR_ATTR make_invalid_pair_attr(void);
...@@ -973,12 +971,8 @@ typedef enum { ...@@ -973,12 +971,8 @@ typedef enum {
FT_UPDATES = 0, FT_UPDATES = 0,
FT_UPDATES_BROADCAST, FT_UPDATES_BROADCAST,
FT_DESCRIPTOR_SET, FT_DESCRIPTOR_SET,
FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
FT_MSN_DISCARDS, // how many messages were ignored by leaf because of msn FT_MSN_DISCARDS, // how many messages were ignored by leaf because of msn
//FT_MAX_WORKDONE, // max workdone value of any buffer
FT_TOTAL_RETRIES, // total number of search retries due to TRY_AGAIN FT_TOTAL_RETRIES, // total number of search retries due to TRY_AGAIN
//FT_MAX_SEARCH_EXCESS_RETRIES, // max number of excess search retries (retries - treeheight) due to TRY_AGAIN
FT_SEARCH_TRIES_GT_HEIGHT, // number of searches that required more tries than the height of the tree FT_SEARCH_TRIES_GT_HEIGHT, // number of searches that required more tries than the height of the tree
FT_SEARCH_TRIES_GT_HEIGHTPLUS3, // number of searches that required more tries than the height of the tree plus three FT_SEARCH_TRIES_GT_HEIGHTPLUS3, // number of searches that required more tries than the height of the tree plus three
FT_DISK_FLUSH_LEAF, // number of leaf nodes flushed to disk, not for checkpoint FT_DISK_FLUSH_LEAF, // number of leaf nodes flushed to disk, not for checkpoint
...@@ -997,6 +991,14 @@ typedef enum { ...@@ -997,6 +991,14 @@ typedef enum {
FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, // number of nonleaf node partial evictions
FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
FT_PARTIAL_EVICTIONS_LEAF_BYTES, // number of leaf node partial evictions
FT_FULL_EVICTIONS_LEAF, // number of full cachetable evictions on leaf nodes
FT_FULL_EVICTIONS_LEAF_BYTES, // number of full cachetable evictions on leaf nodes (bytes)
FT_FULL_EVICTIONS_NONLEAF, // number of full cachetable evictions on nonleaf nodes
FT_FULL_EVICTIONS_NONLEAF_BYTES, // number of full cachetable evictions on nonleaf nodes (bytes)
FT_CREATE_LEAF, // number of leaf nodes created FT_CREATE_LEAF, // number of leaf nodes created
FT_CREATE_NONLEAF, // number of nonleaf nodes created FT_CREATE_NONLEAF, // number of nonleaf nodes created
FT_DESTROY_LEAF, // number of leaf nodes destroyed FT_DESTROY_LEAF, // number of leaf nodes destroyed
......
...@@ -175,8 +175,6 @@ status_init(void) ...@@ -175,8 +175,6 @@ status_init(void)
STATUS_INIT(FT_UPDATES, PARCOUNT, "dictionary updates"); STATUS_INIT(FT_UPDATES, PARCOUNT, "dictionary updates");
STATUS_INIT(FT_UPDATES_BROADCAST, PARCOUNT, "dictionary broadcast updates"); STATUS_INIT(FT_UPDATES_BROADCAST, PARCOUNT, "dictionary broadcast updates");
STATUS_INIT(FT_DESCRIPTOR_SET, PARCOUNT, "descriptor set"); STATUS_INIT(FT_DESCRIPTOR_SET, PARCOUNT, "descriptor set");
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node partial evictions");
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF, PARCOUNT, "leaf node partial evictions");
STATUS_INIT(FT_MSN_DISCARDS, PARCOUNT, "messages ignored by leaf due to msn"); STATUS_INIT(FT_MSN_DISCARDS, PARCOUNT, "messages ignored by leaf due to msn");
STATUS_INIT(FT_TOTAL_RETRIES, PARCOUNT, "total search retries due to TRY_AGAIN"); STATUS_INIT(FT_TOTAL_RETRIES, PARCOUNT, "total search retries due to TRY_AGAIN");
STATUS_INIT(FT_SEARCH_TRIES_GT_HEIGHT, PARCOUNT, "searches requiring more tries than the height of the tree"); STATUS_INIT(FT_SEARCH_TRIES_GT_HEIGHT, PARCOUNT, "searches requiring more tries than the height of the tree");
...@@ -200,6 +198,16 @@ status_init(void) ...@@ -200,6 +198,16 @@ status_init(void)
STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH, PARCOUNT, "buffers decompressed for prefetch"); STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH, PARCOUNT, "buffers decompressed for prefetch");
STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE, PARCOUNT, "buffers decompressed for write"); STATUS_INIT(FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE, PARCOUNT, "buffers decompressed for write");
// Eviction statistics:
STATUS_INIT(FT_FULL_EVICTIONS_LEAF, PARCOUNT, "leaf node full evictions");
STATUS_INIT(FT_FULL_EVICTIONS_LEAF_BYTES, PARCOUNT, "leaf node full evictions (bytes)");
STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node full evictions");
STATUS_INIT(FT_FULL_EVICTIONS_NONLEAF_BYTES, PARCOUNT, "nonleaf node full evictions (bytes)");
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF, PARCOUNT, "leaf node partial evictions");
STATUS_INIT(FT_PARTIAL_EVICTIONS_LEAF_BYTES, PARCOUNT, "leaf node partial evictions (bytes)");
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF, PARCOUNT, "nonleaf node partial evictions");
STATUS_INIT(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, PARCOUNT, "nonleaf node partial evictions (bytes)");
// Disk read statistics: // Disk read statistics:
// //
// Pivots: For queries, prefetching, or writing. // Pivots: For queries, prefetching, or writing.
...@@ -498,7 +506,7 @@ exit: ...@@ -498,7 +506,7 @@ exit:
return retval; return retval;
} }
long static long
ftnode_memory_size (FTNODE node) ftnode_memory_size (FTNODE node)
// Effect: Estimate how much main memory a node requires. // Effect: Estimate how much main memory a node requires.
{ {
...@@ -793,7 +801,7 @@ void toku_ftnode_clone_callback( ...@@ -793,7 +801,7 @@ void toku_ftnode_clone_callback(
static void ft_leaf_run_gc(FTNODE node, FT ft); static void ft_leaf_run_gc(FTNODE node, FT ft);
void toku_ftnode_flush_callback ( void toku_ftnode_flush_callback(
CACHEFILE UU(cachefile), CACHEFILE UU(cachefile),
int fd, int fd,
BLOCKNUM nodename, BLOCKNUM nodename,
...@@ -827,6 +835,14 @@ void toku_ftnode_flush_callback ( ...@@ -827,6 +835,14 @@ void toku_ftnode_flush_callback (
} }
if (!keep_me) { if (!keep_me) {
if (!is_clone) { if (!is_clone) {
long node_size = ftnode_memory_size(ftnode);
if (ftnode->height == 0) {
STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
} else {
STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
STATUS_INC(FT_FULL_EVICTIONS_NONLEAF_BYTES, node_size);
}
toku_free(*disk_data); toku_free(*disk_data);
} }
else { else {
...@@ -993,8 +1009,11 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR* ...@@ -993,8 +1009,11 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node,i) == PT_AVAIL) { if (BP_STATE(node,i) == PT_AVAIL) {
if (BP_SHOULD_EVICT(node,i)) { if (BP_SHOULD_EVICT(node,i)) {
long size_before = ftnode_memory_size(node);
compress_internal_node_partition(node, i, ft->h->compression_method);
long delta = size_before - ftnode_memory_size(node);
STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF, 1); STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF, 1);
cilk_spawn compress_internal_node_partition(node, i, ft->h->compression_method); STATUS_INC(FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, delta);
} }
else { else {
BP_SWEEP_CLOCK(node,i); BP_SWEEP_CLOCK(node,i);
...@@ -1004,7 +1023,6 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR* ...@@ -1004,7 +1023,6 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
continue; continue;
} }
} }
cilk_sync;
} }
// //
// partial eviction strategy for basement nodes: // partial eviction strategy for basement nodes:
...@@ -1015,17 +1033,23 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR* ...@@ -1015,17 +1033,23 @@ int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR UU(old_attr), PAIR_ATTR*
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
// Get rid of compressed stuff no matter what. // Get rid of compressed stuff no matter what.
if (BP_STATE(node,i) == PT_COMPRESSED) { if (BP_STATE(node,i) == PT_COMPRESSED) {
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1); long size_before = ftnode_memory_size(node);
SUB_BLOCK sb = BSB(node, i); SUB_BLOCK sb = BSB(node, i);
toku_free(sb->compressed_ptr); toku_free(sb->compressed_ptr);
toku_free(sb); toku_free(sb);
set_BNULL(node, i); set_BNULL(node, i);
BP_STATE(node,i) = PT_ON_DISK; BP_STATE(node,i) = PT_ON_DISK;
long delta = size_before - ftnode_memory_size(node);
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF_BYTES, delta);
} }
else if (BP_STATE(node,i) == PT_AVAIL) { else if (BP_STATE(node,i) == PT_AVAIL) {
if (BP_SHOULD_EVICT(node,i)) { if (BP_SHOULD_EVICT(node,i)) {
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1); long size_before = ftnode_memory_size(node);
toku_evict_bn_from_memory(node, i, ft); toku_evict_bn_from_memory(node, i, ft);
long delta = size_before - ftnode_memory_size(node);
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF, 1);
STATUS_INC(FT_PARTIAL_EVICTIONS_LEAF_BYTES, delta);
} }
else { else {
BP_SWEEP_CLOCK(node,i); BP_SWEEP_CLOCK(node,i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment