Commit 32b2bac5 authored by Yoni Fogel's avatar Yoni Fogel

closes[t:2408] Merge branch #2408 back to main. Delete dev branch.

git-svn-id: file:///svn/toku/tokudb@18163 c7de825b-a66e-492c-adef-691d508d4ae1
parent a28d0f58
......@@ -290,6 +290,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -308,7 +320,8 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
void* __toku_dummy0[19];
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void* __toku_dummy0[18];
char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=236 size=4, 64=bit offset=376 size=8 */
void* __toku_dummy2[5];
......
......@@ -300,6 +300,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -318,7 +330,8 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
void* __toku_dummy0[22];
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void* __toku_dummy0[21];
char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=248 size=4, 64=bit offset=400 size=8 */
void* __toku_dummy2[5];
......
......@@ -303,6 +303,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -321,7 +333,8 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
void* __toku_dummy0[24];
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void* __toku_dummy0[23];
char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=256 size=4, 64=bit offset=416 size=8 */
void* __toku_dummy2[5];
......
......@@ -303,6 +303,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -321,7 +333,8 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
void* __toku_dummy0[27];
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void* __toku_dummy0[26];
char __toku_dummy1[96];
void *api_internal; /* 32-bit offset=268 size=4, 64=bit offset=440 size=8 */
void* __toku_dummy2[5];
......
......@@ -306,6 +306,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -325,7 +337,8 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
void* __toku_dummy1[31];
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void* __toku_dummy1[30];
char __toku_dummy2[80];
void *api_internal; /* 32-bit offset=276 size=4, 64=bit offset=464 size=8 */
void* __toku_dummy3[5];
......
......@@ -522,6 +522,21 @@ int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__un
assert(sizeof(db_fields32)==sizeof(db_fields64));
{
//file fragmentation info
//a block is just a contiguous region in a file.
printf("//One header is included in 'data'\n");
printf("//One header is included in 'additional for checkpoint'\n");
printf("typedef struct __toku_db_fragmentation {\n");
printf(" uint64_t file_size_bytes; //Total file size in bytes\n");
printf(" uint64_t data_bytes; //Compressed User Data in bytes\n");
printf(" uint64_t data_blocks; //Number of blocks of compressed User Data\n");
printf(" uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system\n");
printf(" uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system \n");
printf(" uint64_t unused_bytes; //Unused space in file\n");
printf(" uint64_t unused_blocks; //Number of contiguous regions of unused space\n");
printf(" uint64_t largest_unused_block; //Size of largest contiguous unused space\n");
printf("} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;\n");
const char *extra[]={"int (*key_range64)(DB*, DB_TXN *, DBT *, u_int64_t *less, u_int64_t *equal, u_int64_t *greater, int *is_exact)",
"int (*stat64)(DB *, DB_TXN *, DB_BTREE_STAT64 *)",
"int (*pre_acquire_read_lock)(DB*, DB_TXN*, const DBT*, const DBT*, const DBT*, const DBT*)",
......@@ -535,6 +550,7 @@ int main (int argc __attribute__((__unused__)), char *argv[] __attribute__((__un
"int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */",
"int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */",
"int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */",
"int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION)",
NULL};
print_struct("db", 1, db_fields32, db_fields64, sizeof(db_fields32)/sizeof(db_fields32[0]), extra);
}
......
......@@ -276,6 +276,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -294,6 +306,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void *api_internal;
int (*close) (DB*, u_int32_t);
int (*cursor) (DB *, DB_TXN *, DBC **, u_int32_t);
......
......@@ -276,6 +276,18 @@ struct __toku_dbt {
typedef int (*toku_dbt_upgradef)(DB*,
u_int32_t old_version, const DBT *old_descriptor, const DBT *old_key, const DBT *old_val,
u_int32_t new_version, const DBT *new_descriptor, const DBT *new_key, const DBT *new_val);
//One header is included in 'data'
//One header is included in 'additional for checkpoint'
typedef struct __toku_db_fragmentation {
uint64_t file_size_bytes; //Total file size in bytes
uint64_t data_bytes; //Compressed User Data in bytes
uint64_t data_blocks; //Number of blocks of compressed User Data
uint64_t checkpoint_bytes_additional; //Additional bytes used for checkpoint system
uint64_t checkpoint_blocks_additional; //Additional blocks used for checkpoint system
uint64_t unused_bytes; //Unused space in file
uint64_t unused_blocks; //Number of contiguous regions of unused space
uint64_t largest_unused_block; //Size of largest contiguous unused space
} *TOKU_DB_FRAGMENTATION, TOKU_DB_FRAGMENTATION_S;
struct __toku_db {
struct __toku_db_internal *i;
#define db_struct_i(x) ((x)->i)
......@@ -294,6 +306,7 @@ struct __toku_db {
int (*getf_set)(DB*, DB_TXN*, u_int32_t, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_set without a persistent cursor) */;
int (*getf_get_both)(DB*, DB_TXN*, u_int32_t, DBT*, DBT*, YDB_CALLBACK_FUNCTION, void*) /* same as DBC->c_getf_get_both without a persistent cursor) */;
int (*flatten)(DB*, DB_TXN*) /* Flatten a dictionary, similar to (but faster than) a table scan */;
int (*get_fragmentation)(DB*,TOKU_DB_FRAGMENTATION);
void *api_internal;
int (*close) (DB*, u_int32_t);
int (*cursor) (DB *, DB_TXN *, DBC **, u_int32_t);
......
......@@ -245,3 +245,80 @@ block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b,
return 0;
}
}
void
block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report) {
//Requires: report->file_size_bytes is filled in
//Requires: report->data_bytes is filled in
//Requires: report->checkpoint_bytes_additional is filled in
assert(ba->n_bytes_in_use == report->data_bytes + report->checkpoint_bytes_additional);
report->unused_bytes = 0;
report->unused_blocks = 0;
report->largest_unused_block = 0;
if (ba->n_blocks > 0) {
//Deal with space before block 0 and after reserve:
{
struct blockpair *bp = &ba->blocks_array[0];
assert(bp->offset >= align(ba->reserve_at_beginning, ba));
uint64_t free_space = bp->offset - align(ba->reserve_at_beginning, ba);
if (free_space > 0) {
report->unused_bytes += free_space;
report->unused_blocks++;
if (free_space > report->largest_unused_block) {
report->largest_unused_block = free_space;
}
}
}
//Deal with space between blocks:
for (u_int64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
// Consider the space after blocknum
struct blockpair *bp = &ba->blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t end_of_this_block = align(this_offset+this_size, ba);
uint64_t next_offset = bp[1].offset;
uint64_t free_space = next_offset - end_of_this_block;
if (free_space > 0) {
report->unused_bytes += free_space;
report->unused_blocks++;
if (free_space > report->largest_unused_block) {
report->largest_unused_block = free_space;
}
}
}
//Deal with space after last block
{
struct blockpair *bp = &ba->blocks_array[ba->n_blocks-1];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t end_of_this_block = align(this_offset+this_size, ba);
if (end_of_this_block < report->file_size_bytes) {
uint64_t free_space = report->file_size_bytes - end_of_this_block;
assert(free_space > 0);
report->unused_bytes += free_space;
report->unused_blocks++;
if (free_space > report->largest_unused_block) {
report->largest_unused_block = free_space;
}
}
}
}
else {
//No blocks. Just the reserve.
uint64_t end_of_this_block = align(ba->reserve_at_beginning, ba);
if (end_of_this_block < report->file_size_bytes) {
uint64_t free_space = report->file_size_bytes - end_of_this_block;
assert(free_space > 0);
report->unused_bytes += free_space;
report->unused_blocks++;
if (free_space > report->largest_unused_block) {
report->largest_unused_block = free_space;
}
}
}
}
......@@ -116,4 +116,10 @@ block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b,
// Return 0 if there is a block that big, return nonzero if b is too big.
// This is probably only useful for tests.
void
block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report);
//Requires: report->file_size_bytes is filled in
//Requires: report->data_bytes is filled in
//Requires: report->checkpoint_bytes_additional is filled in
#endif
......@@ -322,11 +322,6 @@ calculate_size_on_disk (struct translation *t) {
return r;
}
static void
translation_update_size_on_disk (struct translation *t) {
t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size = calculate_size_on_disk(t);
}
// We cannot free the disk space allocated to this blocknum if it is still in use by the given translation table.
static inline BOOL
translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_translation_pair *old_pair) {
......@@ -480,7 +475,6 @@ toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, struct brt_header
maybe_expand_translation(t); //Ensure a never used blocknums is available
result = t->smallest_never_used_blocknum;
t->smallest_never_used_blocknum.b++;
translation_update_size_on_disk(t);
} else { // reuse a previously used blocknum
result = t->blocknum_freelist_head;
BLOCKNUM next = t->block_translation[result.b].u.next_free_blocknum;
......@@ -832,4 +826,54 @@ toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size)
unlock_for_blocktable(bt);
}
void
toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report) {
//Requires: blocktable lock is held.
//Requires: report->file_size_bytes is already filled in.
//Count the headers.
report->data_bytes = BLOCK_ALLOCATOR_HEADER_RESERVE;
report->data_blocks = 1;
report->checkpoint_bytes_additional = BLOCK_ALLOCATOR_HEADER_RESERVE;
report->checkpoint_blocks_additional = 1;
struct translation *current = &bt->current;
int64_t i;
for (i = 0; i < current->length_of_array; i++) {
struct block_translation_pair *pair = &current->block_translation[i];
if (pair->size > 0) {
report->data_bytes += pair->size;
report->data_blocks++;
}
}
struct translation *checkpointed = &bt->checkpointed;
for (i = 0; i < checkpointed->length_of_array; i++) {
struct block_translation_pair *pair = &checkpointed->block_translation[i];
if (pair->size > 0 &&
!(i < current->length_of_array &&
current->block_translation[i].size > 0 &&
current->block_translation[i].u.diskoff == pair->u.diskoff)
) {
report->checkpoint_bytes_additional += pair->size;
report->checkpoint_blocks_additional++;
}
}
struct translation *inprogress = &bt->inprogress;
for (i = 0; i < inprogress->length_of_array; i++) {
struct block_translation_pair *pair = &inprogress->block_translation[i];
if (pair->size > 0 &&
!(i < current->length_of_array &&
current->block_translation[i].size > 0 &&
current->block_translation[i].u.diskoff == pair->u.diskoff) &&
!(i < checkpointed->length_of_array &&
checkpointed->block_translation[i].size > 0 &&
checkpointed->block_translation[i].u.diskoff == pair->u.diskoff)
) {
report->checkpoint_bytes_additional += pair->size;
report->checkpoint_blocks_additional++;
}
}
block_allocator_get_unused_statistics(bt->block_allocator, report);
}
......@@ -61,8 +61,9 @@ enum translation_type {TRANSLATION_NONE=0,
int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, BOOL data_only, BOOL used_only);
void toku_blocktable_internal_fragmentation(BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep);
//ROOT FIFO (To delete)
u_int64_t toku_block_allocator_allocated_limit(BLOCK_TABLE bt);
void toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report);
//Requires: blocktable lock is held.
//Requires: report->file_size_bytes is already filled in.
#endif
......@@ -5338,3 +5338,22 @@ int toku_brt_remove_now(CACHETABLE ct, DBT* iname_dbt_p, DBT* iname_within_cwd_d
return r;
}
int
toku_brt_get_fragmentation(BRT brt, TOKU_DB_FRAGMENTATION report) {
int r;
toku_brtheader_lock(brt->h);
int64_t file_size;
if (toku_cachefile_is_dev_null(brt->cf))
r = EINVAL;
else
r = toku_os_get_file_size(toku_cachefile_fd(brt->cf), &file_size);
if (r==0) {
report->file_size_bytes = file_size;
toku_block_table_get_fragmentation_unlocked(brt->h->blocktable, report);
}
toku_brtheader_unlock(brt->h);
return r;
}
......@@ -203,6 +203,8 @@ int toku_brt_note_table_lock (BRT brt, TOKUTXN txn);
int toku_brt_zombie_needed (BRT brt);
int toku_brt_get_fragmentation(BRT brt, TOKU_DB_FRAGMENTATION report);
//TODO: #1485 once we have multiple main threads, restore this code, analyze performance.
#ifndef TOKU_MULTIPLE_MAIN_THREADS
#define TOKU_MULTIPLE_MAIN_THREADS 0
......
......@@ -132,6 +132,7 @@ BDB_DONTRUN_TESTS = \
recover-put-multiple-fdelete-all \
recover-put-multiple-fdelete-some \
recover-split-checkpoint \
progress \
#\ ends prev line
# checkpoint tests depend on this header file,
......
......@@ -25,6 +25,27 @@ const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_PRIVATE;
int ninsert, nread, nread_notfound, nread_failed, ndelete, ndelete_notfound, ndelete_failed;
static TOKU_DB_FRAGMENTATION_S report;
static void
check_fragmentation(void) {
int r = db->get_fragmentation(db, &report);
CKERR(r);
}
static void
print_fragmentation(void) {
printf("Fragmentation:\n");
printf("\tTotal file size in bytes (file_size_bytes): %"PRIu64"\n", report.file_size_bytes);
printf("\tCompressed User Data in bytes (data_bytes): %"PRIu64"\n", report.data_bytes);
printf("\tNumber of blocks of compressed User Data (data_blocks): %"PRIu64"\n", report.data_blocks);
printf("\tAdditional bytes used for checkpoint system (checkpoint_bytes_additional): %"PRIu64"\n", report.checkpoint_bytes_additional);
printf("\tAdditional blocks used for checkpoint system (checkpoint_blocks_additional): %"PRIu64"\n", report.checkpoint_blocks_additional);
printf("\tUnused space in file (unused_bytes): %"PRIu64"\n", report.unused_bytes);
printf("\tNumber of contiguous regions of unused space (unused_blocks): %"PRIu64"\n", report.unused_blocks);
printf("\tSize of largest contiguous unused space (largest_unused_block): %"PRIu64"\n", report.largest_unused_block);
}
static void
close_em (void)
{
......@@ -147,6 +168,9 @@ getsizeM(void) {
int r = toku_stat(path, &buf);
CKERR(r);
int sizeM = (int)buf.st_size >> 20;
check_fragmentation();
if (verbose>1)
print_fragmentation();
return sizeM;
}
......@@ -205,7 +229,11 @@ int test_main (int argc __attribute__((__unused__)), char *argv[] __attribute__(
setup();
if (verbose) print_engine_status(env);
test_filesize();
if (verbose) print_engine_status(env);
if (verbose) {
print_engine_status(env);
}
check_fragmentation();
if (verbose) print_fragmentation();
close_em();
return 0;
}
......@@ -5091,6 +5091,25 @@ static int locked_db_flatten(DB *db, DB_TXN *txn) {
toku_ydb_lock(); int r = autotxn_db_flatten(db, txn); toku_ydb_unlock(); return r;
}
static int
db_get_fragmentation(DB * db, TOKU_DB_FRAGMENTATION report) {
HANDLE_PANICKED_DB(db);
int r;
if (!db_opened(db))
r = toku_ydb_do_error(db->dbenv, EINVAL, "Fragmentation report available only on open DBs.\n");
else
r = toku_brt_get_fragmentation(db->i->brt, report);
return r;
}
static int
locked_db_get_fragmentation(DB * db, TOKU_DB_FRAGMENTATION report) {
toku_ydb_lock();
int r = db_get_fragmentation(db, report);
toku_ydb_unlock();
return r;
}
static int toku_db_create(DB ** db, DB_ENV * env, u_int32_t flags) {
int r;
......@@ -5134,6 +5153,7 @@ static int toku_db_create(DB ** db, DB_ENV * env, u_int32_t flags) {
SDB(getf_set);
SDB(getf_get_both);
SDB(flatten);
SDB(get_fragmentation);
#undef SDB
result->dbt_pos_infty = toku_db_dbt_pos_infty;
result->dbt_neg_infty = toku_db_dbt_neg_infty;
......@@ -5340,5 +5360,3 @@ ydb_load_inames(DB_ENV * env, DB_TXN * txn, int N, DB * dbs[N], char * new_iname
return rval;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment