Commit 96a2a6fc authored by John Esmet's avatar John Esmet

fixes #134 Add the get/set/change_fanout API

parent e3b21c8f
...@@ -540,6 +540,9 @@ static void print_db_struct (void) { ...@@ -540,6 +540,9 @@ static void print_db_struct (void) {
"int (*change_compression_method)(DB*,TOKU_COMPRESSION_METHOD)", "int (*change_compression_method)(DB*,TOKU_COMPRESSION_METHOD)",
"int (*get_compression_method)(DB*,TOKU_COMPRESSION_METHOD*)", "int (*get_compression_method)(DB*,TOKU_COMPRESSION_METHOD*)",
"int (*set_compression_method)(DB*,TOKU_COMPRESSION_METHOD)", "int (*set_compression_method)(DB*,TOKU_COMPRESSION_METHOD)",
"int (*change_fanout)(DB *db, uint32_t fanout)",
"int (*get_fanout)(DB *db, uint32_t *fanout)",
"int (*set_fanout)(DB *db, uint32_t fanout)",
"int (*set_indexer)(DB*, DB_INDEXER*)", "int (*set_indexer)(DB*, DB_INDEXER*)",
"void (*get_indexer)(DB*, DB_INDEXER**)", "void (*get_indexer)(DB*, DB_INDEXER**)",
"int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going)", "int (*verify_with_progress)(DB *, int (*progress_callback)(void *progress_extra, float progress), void *progress_extra, int verbose, int keep_going)",
......
...@@ -544,11 +544,13 @@ ct_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_ext ...@@ -544,11 +544,13 @@ ct_flusher_advice_init(struct flusher_advice *fa, struct flush_status_update_ext
// a leaf node that is not entirely in memory. If so, then // a leaf node that is not entirely in memory. If so, then
// we cannot be sure if the node is reactive. // we cannot be sure if the node is reactive.
// //
static bool may_node_be_reactive(FTNODE node) static bool may_node_be_reactive(FT ft, FTNODE node)
{ {
if (node->height == 0) return true; if (node->height == 0) {
return true;
}
else { else {
return (get_nonleaf_reactivity(node) != RE_STABLE); return (get_nonleaf_reactivity(node, ft->h->fanout) != RE_STABLE);
} }
} }
...@@ -1589,7 +1591,7 @@ static void ft_flush_some_child( ...@@ -1589,7 +1591,7 @@ static void ft_flush_some_child(
// Let's do a quick check to see if the child may be reactive // Let's do a quick check to see if the child may be reactive
// If the child cannot be reactive, then we can safely unlock // If the child cannot be reactive, then we can safely unlock
// the parent before finishing reading in the entire child node. // the parent before finishing reading in the entire child node.
bool may_child_be_reactive = may_node_be_reactive(child); bool may_child_be_reactive = may_node_be_reactive(ft, child);
paranoid_invariant(child->thisnodename.b!=0); paranoid_invariant(child->thisnodename.b!=0);
//VERIFY_NODE(brt, child); //VERIFY_NODE(brt, child);
...@@ -1631,7 +1633,7 @@ static void ft_flush_some_child( ...@@ -1631,7 +1633,7 @@ static void ft_flush_some_child(
// we wont be splitting/merging child // we wont be splitting/merging child
// and we have already replaced the bnc // and we have already replaced the bnc
// for the root with a fresh one // for the root with a fresh one
enum reactivity child_re = get_node_reactivity(child, ft->h->nodesize); enum reactivity child_re = get_node_reactivity(ft, child);
if (parent && child_re == RE_STABLE) { if (parent && child_re == RE_STABLE) {
toku_unpin_ftnode_off_client_thread(ft, parent); toku_unpin_ftnode_off_client_thread(ft, parent);
parent = NULL; parent = NULL;
...@@ -1661,7 +1663,7 @@ static void ft_flush_some_child( ...@@ -1661,7 +1663,7 @@ static void ft_flush_some_child(
// let's get the reactivity of the child again, // let's get the reactivity of the child again,
// it is possible that the flush got rid of some values // it is possible that the flush got rid of some values
// and now the parent is no longer reactive // and now the parent is no longer reactive
child_re = get_node_reactivity(child, ft->h->nodesize); child_re = get_node_reactivity(ft, child);
// if the parent has been unpinned above, then // if the parent has been unpinned above, then
// this is our only option, even if the child is not stable // this is our only option, even if the child is not stable
// if the child is not stable, we'll handle it the next // if the child is not stable, we'll handle it the next
...@@ -1971,7 +1973,7 @@ void toku_ft_flush_node_on_background_thread(FT h, FTNODE parent) ...@@ -1971,7 +1973,7 @@ void toku_ft_flush_node_on_background_thread(FT h, FTNODE parent)
// //
// successfully locked child // successfully locked child
// //
bool may_child_be_reactive = may_node_be_reactive(child); bool may_child_be_reactive = may_node_be_reactive(h, child);
if (!may_child_be_reactive) { if (!may_child_be_reactive) {
// We're going to unpin the parent, so before we do, we must // We're going to unpin the parent, so before we do, we must
// check to see if we need to blow away the basement nodes to // check to see if we need to blow away the basement nodes to
......
...@@ -117,15 +117,10 @@ PATENT RIGHTS GRANT: ...@@ -117,15 +117,10 @@ PATENT RIGHTS GRANT:
#include <util/omt.h> #include <util/omt.h>
#include "bndata.h" #include "bndata.h"
#ifndef FT_FANOUT
#define FT_FANOUT 16
#endif
enum { TREE_FANOUT = FT_FANOUT };
enum { KEY_VALUE_OVERHEAD = 8 }; /* Must store the two lengths. */ enum { KEY_VALUE_OVERHEAD = 8 }; /* Must store the two lengths. */
enum { FT_CMD_OVERHEAD = (2 + sizeof(MSN)) // the type plus freshness plus MSN enum { FT_CMD_OVERHEAD = (2 + sizeof(MSN)) }; // the type plus freshness plus MSN
}; enum { FT_DEFAULT_FANOUT = 16 };
enum { FT_DEFAULT_NODE_SIZE = 4 * 1024 * 1024 };
enum { FT_DEFAULT_NODE_SIZE = 1 << 22 };
enum { FT_DEFAULT_BASEMENT_NODE_SIZE = 128 * 1024 }; enum { FT_DEFAULT_BASEMENT_NODE_SIZE = 128 * 1024 };
// //
...@@ -238,12 +233,10 @@ void toku_bnc_flush_to_child(FT h, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID ol ...@@ -238,12 +233,10 @@ void toku_bnc_flush_to_child(FT h, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID ol
bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull)); bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull));
bool toku_ft_nonleaf_is_gorged(FTNODE node, uint32_t nodesize); bool toku_ft_nonleaf_is_gorged(FTNODE node, uint32_t nodesize);
enum reactivity get_nonleaf_reactivity(FTNODE node, unsigned int fanout);
enum reactivity get_nonleaf_reactivity (FTNODE node); enum reactivity get_node_reactivity(FT ft, FTNODE node);
enum reactivity get_node_reactivity (FTNODE node, uint32_t nodesize);
uint32_t get_leaf_num_entries(FTNODE node); uint32_t get_leaf_num_entries(FTNODE node);
// data of an available partition of a leaf ftnode // data of an available partition of a leaf ftnode
struct ftnode_leaf_basement_node { struct ftnode_leaf_basement_node {
bn_data data_buffer; bn_data data_buffer;
...@@ -336,7 +329,7 @@ struct ftnode { ...@@ -336,7 +329,7 @@ struct ftnode {
int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */ int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */
int dirty; int dirty;
uint32_t fullhash; uint32_t fullhash;
int n_children; //for internal nodes, if n_children==TREE_FANOUT+1 then the tree needs to be rebalanced. int n_children; //for internal nodes, if n_children==fanout+1 then the tree needs to be rebalanced.
// for leaf nodes, represents number of basement nodes // for leaf nodes, represents number of basement nodes
unsigned int totalchildkeylens; unsigned int totalchildkeylens;
DBT *childkeys; /* Pivot keys. Child 0's keys are <= childkeys[0]. Child 1's keys are <= childkeys[1]. DBT *childkeys; /* Pivot keys. Child 0's keys are <= childkeys[0]. Child 1's keys are <= childkeys[1].
...@@ -509,6 +502,7 @@ struct ft_header { ...@@ -509,6 +502,7 @@ struct ft_header {
unsigned int nodesize; unsigned int nodesize;
unsigned int basementnodesize; unsigned int basementnodesize;
enum toku_compression_method compression_method; enum toku_compression_method compression_method;
unsigned int fanout;
// Current Minimum MSN to be used when upgrading pre-MSN BRT's. // Current Minimum MSN to be used when upgrading pre-MSN BRT's.
// This is decremented from our currnt MIN_MSN so as not to clash // This is decremented from our currnt MIN_MSN so as not to clash
...@@ -590,6 +584,7 @@ struct ft_options { ...@@ -590,6 +584,7 @@ struct ft_options {
unsigned int nodesize; unsigned int nodesize;
unsigned int basementnodesize; unsigned int basementnodesize;
enum toku_compression_method compression_method; enum toku_compression_method compression_method;
unsigned int fanout;
unsigned int flags; unsigned int flags;
ft_compare_func compare_fun; ft_compare_func compare_fun;
ft_update_func update_fun; ft_update_func update_fun;
......
...@@ -428,21 +428,21 @@ get_leaf_reactivity (FTNODE node, uint32_t nodesize) { ...@@ -428,21 +428,21 @@ get_leaf_reactivity (FTNODE node, uint32_t nodesize) {
} }
enum reactivity enum reactivity
get_nonleaf_reactivity (FTNODE node) { get_nonleaf_reactivity(FTNODE node, unsigned int fanout) {
paranoid_invariant(node->height>0); paranoid_invariant(node->height>0);
int n_children = node->n_children; int n_children = node->n_children;
if (n_children > TREE_FANOUT) return RE_FISSIBLE; if (n_children > (int) fanout) return RE_FISSIBLE;
if (n_children*4 < TREE_FANOUT) return RE_FUSIBLE; if (n_children*4 < (int) fanout) return RE_FUSIBLE;
return RE_STABLE; return RE_STABLE;
} }
enum reactivity enum reactivity
get_node_reactivity (FTNODE node, uint32_t nodesize) { get_node_reactivity(FT ft, FTNODE node) {
toku_assert_entire_node_in_memory(node); toku_assert_entire_node_in_memory(node);
if (node->height==0) if (node->height==0)
return get_leaf_reactivity(node, nodesize); return get_leaf_reactivity(node, ft->h->nodesize);
else else
return get_nonleaf_reactivity(node); return get_nonleaf_reactivity(node, ft->h->fanout);
} }
unsigned int unsigned int
...@@ -2689,7 +2689,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int ...@@ -2689,7 +2689,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int
// true if relocking is needed // true if relocking is needed
// false otherwise // false otherwise
{ {
enum reactivity re = get_node_reactivity(child, ft->h->nodesize); enum reactivity re = get_node_reactivity(ft, child);
enum reactivity newre; enum reactivity newre;
BLOCKNUM child_blocknum; BLOCKNUM child_blocknum;
uint32_t child_fullhash; uint32_t child_fullhash;
...@@ -2723,7 +2723,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int ...@@ -2723,7 +2723,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int
child_blocknum = BP_BLOCKNUM(newparent, childnum); child_blocknum = BP_BLOCKNUM(newparent, childnum);
child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum); child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum);
toku_pin_ftnode_off_client_thread_batched(ft, child_blocknum, child_fullhash, &bfe, PL_WRITE_CHEAP, 1, &newparent, &newchild); toku_pin_ftnode_off_client_thread_batched(ft, child_blocknum, child_fullhash, &bfe, PL_WRITE_CHEAP, 1, &newparent, &newchild);
newre = get_node_reactivity(newchild, ft->h->nodesize); newre = get_node_reactivity(ft, newchild);
if (newre == RE_FISSIBLE) { if (newre == RE_FISSIBLE) {
enum split_mode split_mode; enum split_mode split_mode;
if (newparent->height == 1 && (loc & LEFT_EXTREME) && childnum == 0) { if (newparent->height == 1 && (loc & LEFT_EXTREME) && childnum == 0) {
...@@ -2769,7 +2769,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int ...@@ -2769,7 +2769,7 @@ static bool process_maybe_reactive_child(FT ft, FTNODE parent, FTNODE child, int
child_blocknum = BP_BLOCKNUM(newparent, childnum); child_blocknum = BP_BLOCKNUM(newparent, childnum);
child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum); child_fullhash = compute_child_fullhash(ft->cf, newparent, childnum);
toku_pin_ftnode_off_client_thread_batched(ft, child_blocknum, child_fullhash, &bfe, PL_READ, 1, &newparent, &newchild); toku_pin_ftnode_off_client_thread_batched(ft, child_blocknum, child_fullhash, &bfe, PL_READ, 1, &newparent, &newchild);
newre = get_node_reactivity(newchild, ft->h->nodesize); newre = get_node_reactivity(ft, newchild);
if (newre == RE_FUSIBLE && newparent->n_children >= 2) { if (newre == RE_FUSIBLE && newparent->n_children >= 2) {
toku_unpin_ftnode_read_only(ft, newchild); toku_unpin_ftnode_read_only(ft, newchild);
toku_ft_merge_child(ft, newparent, childnum); toku_ft_merge_child(ft, newparent, childnum);
...@@ -3059,7 +3059,7 @@ void toku_ft_root_put_cmd( ...@@ -3059,7 +3059,7 @@ void toku_ft_root_put_cmd(
// injection thread to change lock type back and forth, when only one // injection thread to change lock type back and forth, when only one
// of them needs to in order to handle the split. That's not great, // of them needs to in order to handle the split. That's not great,
// but root splits are incredibly rare. // but root splits are incredibly rare.
enum reactivity re = get_node_reactivity(node, ft->h->nodesize); enum reactivity re = get_node_reactivity(ft, node);
switch (re) { switch (re) {
case RE_STABLE: case RE_STABLE:
case RE_FUSIBLE: // cannot merge anything at the root case RE_FUSIBLE: // cannot merge anything at the root
...@@ -3429,6 +3429,7 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *ft_handle_ ...@@ -3429,6 +3429,7 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *ft_handle_
toku_ft_handle_set_nodesize(brt, nodesize); toku_ft_handle_set_nodesize(brt, nodesize);
toku_ft_handle_set_basementnodesize(brt, basementnodesize); toku_ft_handle_set_basementnodesize(brt, basementnodesize);
toku_ft_handle_set_compression_method(brt, compression_method); toku_ft_handle_set_compression_method(brt, compression_method);
toku_ft_handle_set_fanout(brt, 16);
toku_ft_set_bt_compare(brt, compare_fun); toku_ft_set_bt_compare(brt, compare_fun);
int r = toku_ft_handle_open(brt, fname, is_create, only_create, cachetable, txn); int r = toku_ft_handle_open(brt, fname, is_create, only_create, cachetable, txn);
...@@ -3516,6 +3517,27 @@ toku_ft_handle_get_compression_method(FT_HANDLE t, enum toku_compression_method ...@@ -3516,6 +3517,27 @@ toku_ft_handle_get_compression_method(FT_HANDLE t, enum toku_compression_method
} }
} }
void
toku_ft_handle_set_fanout(FT_HANDLE ft_handle, unsigned int fanout)
{
if (ft_handle->ft) {
toku_ft_set_fanout(ft_handle->ft, fanout);
}
else {
ft_handle->options.fanout = fanout;
}
}
void
toku_ft_handle_get_fanout(FT_HANDLE ft_handle, unsigned int *fanout)
{
if (ft_handle->ft) {
toku_ft_get_fanout(ft_handle->ft, fanout);
}
else {
*fanout = ft_handle->options.fanout;
}
}
static int static int
verify_builtin_comparisons_consistent(FT_HANDLE t, uint32_t flags) { verify_builtin_comparisons_consistent(FT_HANDLE t, uint32_t flags) {
if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun)) if ((flags & TOKU_DB_KEYCMP_BUILTIN) && (t->options.compare_fun != toku_builtin_compare_fun))
...@@ -3582,6 +3604,7 @@ toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) { ...@@ -3582,6 +3604,7 @@ toku_ft_handle_inherit_options(FT_HANDLE t, FT ft) {
.nodesize = ft->h->nodesize, .nodesize = ft->h->nodesize,
.basementnodesize = ft->h->basementnodesize, .basementnodesize = ft->h->basementnodesize,
.compression_method = ft->h->compression_method, .compression_method = ft->h->compression_method,
.fanout = ft->h->fanout,
.flags = ft->h->flags, .flags = ft->h->flags,
.compare_fun = ft->compare_fun, .compare_fun = ft->compare_fun,
.update_fun = ft->update_fun .update_fun = ft->update_fun
...@@ -3937,6 +3960,7 @@ void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) { ...@@ -3937,6 +3960,7 @@ void toku_ft_handle_create(FT_HANDLE *ft_handle_ptr) {
brt->options.nodesize = FT_DEFAULT_NODE_SIZE; brt->options.nodesize = FT_DEFAULT_NODE_SIZE;
brt->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE; brt->options.basementnodesize = FT_DEFAULT_BASEMENT_NODE_SIZE;
brt->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD; brt->options.compression_method = TOKU_DEFAULT_COMPRESSION_METHOD;
brt->options.fanout = FT_DEFAULT_FANOUT;
brt->options.compare_fun = toku_builtin_compare_fun; brt->options.compare_fun = toku_builtin_compare_fun;
brt->options.update_fun = NULL; brt->options.update_fun = NULL;
*ft_handle_ptr = brt; *ft_handle_ptr = brt;
......
...@@ -137,6 +137,8 @@ void toku_ft_handle_set_basementnodesize(FT_HANDLE, unsigned int basementnodesiz ...@@ -137,6 +137,8 @@ void toku_ft_handle_set_basementnodesize(FT_HANDLE, unsigned int basementnodesiz
void toku_ft_handle_get_basementnodesize(FT_HANDLE, unsigned int *basementnodesize); void toku_ft_handle_get_basementnodesize(FT_HANDLE, unsigned int *basementnodesize);
void toku_ft_handle_set_compression_method(FT_HANDLE, enum toku_compression_method); void toku_ft_handle_set_compression_method(FT_HANDLE, enum toku_compression_method);
void toku_ft_handle_get_compression_method(FT_HANDLE, enum toku_compression_method *); void toku_ft_handle_get_compression_method(FT_HANDLE, enum toku_compression_method *);
void toku_ft_handle_set_fanout(FT_HANDLE, unsigned int fanout);
void toku_ft_handle_get_fanout(FT_HANDLE, unsigned int *fanout);
void toku_ft_set_bt_compare(FT_HANDLE, ft_compare_func); void toku_ft_set_bt_compare(FT_HANDLE, ft_compare_func);
ft_compare_func toku_ft_get_bt_compare (FT_HANDLE brt); ft_compare_func toku_ft_get_bt_compare (FT_HANDLE brt);
......
...@@ -404,6 +404,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -404,6 +404,7 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
.nodesize = nodesize, .nodesize = nodesize,
.basementnodesize = basementnodesize, .basementnodesize = basementnodesize,
.compression_method = compression_method, .compression_method = compression_method,
.fanout = FT_DEFAULT_FANOUT, // fanout is not serialized, must be set at startup
.highest_unused_msn_for_upgrade = highest_unused_msn_for_upgrade, .highest_unused_msn_for_upgrade = highest_unused_msn_for_upgrade,
.max_msn_in_ft = max_msn_in_ft, .max_msn_in_ft = max_msn_in_ft,
.time_of_last_optimize_begin = time_of_last_optimize_begin, .time_of_last_optimize_begin = time_of_last_optimize_begin,
......
...@@ -142,7 +142,6 @@ int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char ...@@ -142,7 +142,6 @@ int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char
int toku_testsetup_nonleaf (FT_HANDLE brt, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens) { int toku_testsetup_nonleaf (FT_HANDLE brt, int height, BLOCKNUM *blocknum, int n_children, BLOCKNUM *children, char **keys, int *keylens) {
FTNODE node; FTNODE node;
assert(testsetup_initialized); assert(testsetup_initialized);
assert(n_children<=FT_FANOUT);
toku_create_new_ftnode(brt, &node, height, n_children); toku_create_new_ftnode(brt, &node, height, n_children);
int i; int i;
for (i=0; i<n_children; i++) { for (i=0; i<n_children; i++) {
......
...@@ -431,6 +431,7 @@ ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that ...@@ -431,6 +431,7 @@ ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that
.nodesize = options->nodesize, .nodesize = options->nodesize,
.basementnodesize = options->basementnodesize, .basementnodesize = options->basementnodesize,
.compression_method = options->compression_method, .compression_method = options->compression_method,
.fanout = options->fanout,
.highest_unused_msn_for_upgrade = { .msn = (MIN_MSN.msn - 1) }, .highest_unused_msn_for_upgrade = { .msn = (MIN_MSN.msn - 1) },
.max_msn_in_ft = ZERO_MSN, .max_msn_in_ft = ZERO_MSN,
.time_of_last_optimize_begin = 0, .time_of_last_optimize_begin = 0,
...@@ -606,13 +607,16 @@ toku_ft_init(FT ft, ...@@ -606,13 +607,16 @@ toku_ft_init(FT ft,
TXNID root_xid_that_created, TXNID root_xid_that_created,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method compression_method) enum toku_compression_method compression_method,
uint32_t fanout
)
{ {
memset(ft, 0, sizeof *ft); memset(ft, 0, sizeof *ft);
struct ft_options options = { struct ft_options options = {
.nodesize = target_nodesize, .nodesize = target_nodesize,
.basementnodesize = target_basementnodesize, .basementnodesize = target_basementnodesize,
.compression_method = compression_method, .compression_method = compression_method,
.fanout = fanout,
.flags = 0, .flags = 0,
.compare_fun = NULL, .compare_fun = NULL,
.update_fun = NULL .update_fun = NULL
...@@ -633,6 +637,7 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX ...@@ -633,6 +637,7 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX
toku_ft_handle_set_nodesize(t, old_h->h->nodesize); toku_ft_handle_set_nodesize(t, old_h->h->nodesize);
toku_ft_handle_set_basementnodesize(t, old_h->h->basementnodesize); toku_ft_handle_set_basementnodesize(t, old_h->h->basementnodesize);
toku_ft_handle_set_compression_method(t, old_h->h->compression_method); toku_ft_handle_set_compression_method(t, old_h->h->compression_method);
toku_ft_handle_set_fanout(t, old_h->h->fanout);
CACHETABLE ct = toku_cachefile_get_cachetable(old_h->cf); CACHETABLE ct = toku_cachefile_get_cachetable(old_h->cf);
int r = toku_ft_handle_open_with_dict_id(t, fname_in_env, 0, 0, ct, txn, old_h->dict_id); int r = toku_ft_handle_open_with_dict_id(t, fname_in_env, 0, 0, ct, txn, old_h->dict_id);
if (r != 0) { if (r != 0) {
...@@ -1022,6 +1027,19 @@ void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp ...@@ -1022,6 +1027,19 @@ void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp
toku_ft_unlock(ft); toku_ft_unlock(ft);
} }
void toku_ft_set_fanout(FT ft, unsigned int fanout) {
toku_ft_lock(ft);
ft->h->fanout = fanout;
ft->h->dirty = 1;
toku_ft_unlock(ft);
}
void toku_ft_get_fanout(FT ft, unsigned int *fanout) {
toku_ft_lock(ft);
*fanout = ft->h->fanout;
toku_ft_unlock(ft);
}
// mark the ft as a blackhole. any message injections will be a no op. // mark the ft as a blackhole. any message injections will be a no op.
void toku_ft_set_blackhole(FT_HANDLE ft_handle) { void toku_ft_set_blackhole(FT_HANDLE ft_handle) {
ft_handle->ft->blackhole = true; ft_handle->ft->blackhole = true;
......
...@@ -130,13 +130,14 @@ void toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_ ...@@ -130,13 +130,14 @@ void toku_ft_note_hot_complete(FT_HANDLE brt, bool success, MSN msn_at_start_of_
void void
toku_ft_init( toku_ft_init(
FT h, FT ft,
BLOCKNUM root_blocknum_on_disk, BLOCKNUM root_blocknum_on_disk,
LSN checkpoint_lsn, LSN checkpoint_lsn,
TXNID root_xid_that_created, TXNID root_xid_that_created,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method compression_method enum toku_compression_method compression_method,
uint32_t fanout
); );
int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result)); int toku_dictionary_redirect_abort(FT old_h, FT new_h, TOKUTXN txn) __attribute__ ((warn_unused_result));
...@@ -186,6 +187,8 @@ void toku_ft_set_basementnodesize(FT ft, unsigned int basementnodesize); ...@@ -186,6 +187,8 @@ void toku_ft_set_basementnodesize(FT ft, unsigned int basementnodesize);
void toku_ft_get_basementnodesize(FT ft, unsigned int *basementnodesize); void toku_ft_get_basementnodesize(FT ft, unsigned int *basementnodesize);
void toku_ft_set_compression_method(FT ft, enum toku_compression_method method); void toku_ft_set_compression_method(FT ft, enum toku_compression_method method);
void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp); void toku_ft_get_compression_method(FT ft, enum toku_compression_method *methodp);
void toku_ft_set_fanout(FT ft, unsigned int fanout);
void toku_ft_get_fanout(FT ft, unsigned int *fanout);
void toku_node_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p); void toku_node_save_ct_pair(CACHEKEY UU(key), void *value_data, PAIR p);
// mark the ft as a blackhole. any message injections will be a no op. // mark the ft as a blackhole. any message injections will be a no op.
......
...@@ -288,6 +288,7 @@ struct fractal_thread_args { ...@@ -288,6 +288,7 @@ struct fractal_thread_args {
uint32_t target_nodesize; uint32_t target_nodesize;
uint32_t target_basementnodesize; uint32_t target_basementnodesize;
enum toku_compression_method target_compression_method; enum toku_compression_method target_compression_method;
uint32_t target_fanout;
}; };
void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows); void toku_ft_loader_set_n_rows(FTLOADER bl, uint64_t n_rows);
...@@ -319,7 +320,8 @@ int toku_loader_write_brt_from_q_in_C (FTLOADER bl, ...@@ -319,7 +320,8 @@ int toku_loader_write_brt_from_q_in_C (FTLOADER bl,
int which_db, int which_db,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method target_compression_method); enum toku_compression_method target_compression_method,
uint32_t fanout);
int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *); int ft_loader_mergesort_row_array (struct row rows[/*n*/], int n, int which_db, DB *dest_db, ft_compare_func, FTLOADER, struct rowset *);
......
...@@ -2405,7 +2405,8 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2405,7 +2405,8 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
int which_db, int which_db,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method target_compression_method) enum toku_compression_method target_compression_method,
uint32_t target_fanout)
// Effect: Consume a sequence of rowsets work from a queue, creating a fractal tree. Closes fd. // Effect: Consume a sequence of rowsets work from a queue, creating a fractal tree. Closes fd.
{ {
// set the number of fractal tree writer threads so that we can partition memory in the merger // set the number of fractal tree writer threads so that we can partition memory in the merger
...@@ -2434,7 +2435,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, ...@@ -2434,7 +2435,7 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
// TODO: (Zardosht/Yoni/Leif), do this code properly // TODO: (Zardosht/Yoni/Leif), do this code properly
struct ft ft; struct ft ft;
toku_ft_init(&ft, (BLOCKNUM){0}, bl->load_lsn, root_xid_that_created, target_nodesize, target_basementnodesize, target_compression_method); toku_ft_init(&ft, (BLOCKNUM){0}, bl->load_lsn, root_xid_that_created, target_nodesize, target_basementnodesize, target_compression_method, target_fanout);
struct dbout out; struct dbout out;
ZERO_STRUCT(out); ZERO_STRUCT(out);
...@@ -2680,18 +2681,19 @@ int toku_loader_write_brt_from_q_in_C (FTLOADER bl, ...@@ -2680,18 +2681,19 @@ int toku_loader_write_brt_from_q_in_C (FTLOADER bl,
int which_db, int which_db,
uint32_t target_nodesize, uint32_t target_nodesize,
uint32_t target_basementnodesize, uint32_t target_basementnodesize,
enum toku_compression_method target_compression_method) enum toku_compression_method target_compression_method,
uint32_t target_fanout)
// This is probably only for testing. // This is probably only for testing.
{ {
target_nodesize = target_nodesize == 0 ? default_loader_nodesize : target_nodesize; target_nodesize = target_nodesize == 0 ? default_loader_nodesize : target_nodesize;
target_basementnodesize = target_basementnodesize == 0 ? default_loader_basementnodesize : target_basementnodesize; target_basementnodesize = target_basementnodesize == 0 ? default_loader_basementnodesize : target_basementnodesize;
return toku_loader_write_ft_from_q (bl, descriptor, fd, progress_allocation, q, total_disksize_estimate, which_db, target_nodesize, target_basementnodesize, target_compression_method); return toku_loader_write_ft_from_q (bl, descriptor, fd, progress_allocation, q, total_disksize_estimate, which_db, target_nodesize, target_basementnodesize, target_compression_method, target_fanout);
} }
static void* fractal_thread (void *ftav) { static void* fractal_thread (void *ftav) {
struct fractal_thread_args *fta = (struct fractal_thread_args *)ftav; struct fractal_thread_args *fta = (struct fractal_thread_args *)ftav;
int r = toku_loader_write_ft_from_q (fta->bl, fta->descriptor, fta->fd, fta->progress_allocation, fta->q, fta->total_disksize_estimate, fta->which_db, fta->target_nodesize, fta->target_basementnodesize, fta->target_compression_method); int r = toku_loader_write_ft_from_q (fta->bl, fta->descriptor, fta->fd, fta->progress_allocation, fta->q, fta->total_disksize_estimate, fta->which_db, fta->target_nodesize, fta->target_basementnodesize, fta->target_compression_method, fta->target_fanout);
fta->errno_result = r; fta->errno_result = r;
return NULL; return NULL;
} }
...@@ -2727,7 +2729,7 @@ static int loader_do_i (FTLOADER bl, ...@@ -2727,7 +2729,7 @@ static int loader_do_i (FTLOADER bl,
r = get_error_errno(); goto error; r = get_error_errno(); goto error;
} }
uint32_t target_nodesize, target_basementnodesize; uint32_t target_nodesize, target_basementnodesize, target_fanout;
enum toku_compression_method target_compression_method; enum toku_compression_method target_compression_method;
r = dest_db->get_pagesize(dest_db, &target_nodesize); r = dest_db->get_pagesize(dest_db, &target_nodesize);
invariant_zero(r); invariant_zero(r);
...@@ -2735,6 +2737,8 @@ static int loader_do_i (FTLOADER bl, ...@@ -2735,6 +2737,8 @@ static int loader_do_i (FTLOADER bl,
invariant_zero(r); invariant_zero(r);
r = dest_db->get_compression_method(dest_db, &target_compression_method); r = dest_db->get_compression_method(dest_db, &target_compression_method);
invariant_zero(r); invariant_zero(r);
r = dest_db->get_fanout(dest_db, &target_fanout);
invariant_zero(r);
// This structure must stay live until the join below. // This structure must stay live until the join below.
struct fractal_thread_args fta = { bl, struct fractal_thread_args fta = { bl,
...@@ -2748,6 +2752,7 @@ static int loader_do_i (FTLOADER bl, ...@@ -2748,6 +2752,7 @@ static int loader_do_i (FTLOADER bl,
target_nodesize, target_nodesize,
target_basementnodesize, target_basementnodesize,
target_compression_method, target_compression_method,
target_fanout
}; };
r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta); r = toku_pthread_create(bl->fractal_threads+which_db, NULL, fractal_thread, (void*)&fta);
......
...@@ -420,7 +420,8 @@ test_prefetching(void) { ...@@ -420,7 +420,8 @@ test_prefetching(void) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
......
...@@ -355,7 +355,8 @@ test_serialize_nonleaf(void) { ...@@ -355,7 +355,8 @@ test_serialize_nonleaf(void) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
...@@ -438,7 +439,8 @@ test_serialize_leaf(void) { ...@@ -438,7 +439,8 @@ test_serialize_leaf(void) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
......
...@@ -189,7 +189,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -189,7 +189,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
brt_h->compare_fun = long_key_cmp; brt_h->compare_fun = long_key_cmp;
...@@ -319,7 +320,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -319,7 +320,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
brt_h->compare_fun = long_key_cmp; brt_h->compare_fun = long_key_cmp;
......
...@@ -306,7 +306,8 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) { ...@@ -306,7 +306,8 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
...@@ -449,7 +450,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone ...@@ -449,7 +450,8 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
...@@ -586,7 +588,8 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) { ...@@ -586,7 +588,8 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
...@@ -733,7 +736,8 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone) ...@@ -733,7 +736,8 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
...@@ -881,7 +885,8 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool ...@@ -881,7 +885,8 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
...@@ -1009,7 +1014,8 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b ...@@ -1009,7 +1014,8 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
...@@ -1134,7 +1140,8 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { ...@@ -1134,7 +1140,8 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
TXNID_NONE, TXNID_NONE,
4*1024*1024, 4*1024*1024,
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD); TOKU_DEFAULT_COMPRESSION_METHOD,
16);
brt->ft = brt_h; brt->ft = brt_h;
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
......
...@@ -213,7 +213,7 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec ...@@ -213,7 +213,7 @@ static int write_dbfile (char *tf_template, int n, char *output_name, bool expec
ft_loader_set_error_function(&bl.error_callback, NULL, NULL); ft_loader_set_error_function(&bl.error_callback, NULL, NULL);
ft_loader_set_poll_function(&bl.poll_callback, loader_poll_callback, NULL); ft_loader_set_poll_function(&bl.poll_callback, loader_poll_callback, NULL);
result = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD); result = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
toku_set_func_malloc_only(NULL); toku_set_func_malloc_only(NULL);
toku_set_func_realloc_only(NULL); toku_set_func_realloc_only(NULL);
......
...@@ -262,7 +262,7 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI ...@@ -262,7 +262,7 @@ static void test_write_dbfile (char *tf_template, int n, char *output_name, TXNI
assert(fd>=0); assert(fd>=0);
if (verbose) traceit("write to file"); if (verbose) traceit("write to file");
r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD); r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q2, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
assert(r==0); assert(r==0);
r = queue_destroy(q2); r = queue_destroy(q2);
......
...@@ -425,7 +425,7 @@ static void test_merge_files (const char *tf_template, const char *output_name) ...@@ -425,7 +425,7 @@ static void test_merge_files (const char *tf_template, const char *output_name)
int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); int fd = open(output_name, O_RDWR | O_CREAT | O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO);
assert(fd>=0); assert(fd>=0);
r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD); r = toku_loader_write_brt_from_q_in_C(&bl, &desc, fd, 1000, q, size_est, 0, 0, 0, TOKU_DEFAULT_COMPRESSION_METHOD, 16);
assert(r==0); assert(r==0);
destroy_merge_fileset(&fs); destroy_merge_fileset(&fs);
......
...@@ -130,10 +130,10 @@ const char *fname = TOKU_TEST_FILENAME; ...@@ -130,10 +130,10 @@ const char *fname = TOKU_TEST_FILENAME;
static void static void
doit (int ksize __attribute__((__unused__))) { doit (int ksize __attribute__((__unused__))) {
BLOCKNUM cnodes[FT_FANOUT], bnode, anode; BLOCKNUM cnodes[16], bnode, anode;
char *keys[FT_FANOUT-1]; char *keys[16-1];
int keylens[FT_FANOUT-1]; int keylens[16-1];
int i; int i;
int r; int r;
...@@ -144,7 +144,7 @@ doit (int ksize __attribute__((__unused__))) { ...@@ -144,7 +144,7 @@ doit (int ksize __attribute__((__unused__))) {
toku_testsetup_initialize(); // must precede any other toku_testsetup calls toku_testsetup_initialize(); // must precede any other toku_testsetup calls
for (i=0; i<FT_FANOUT; i++) { for (i=0; i<16; i++) {
r=toku_testsetup_leaf(t, &cnodes[i], 1, NULL, NULL); r=toku_testsetup_leaf(t, &cnodes[i], 1, NULL, NULL);
assert(r==0); assert(r==0);
char key[KSIZE+10]; char key[KSIZE+10];
...@@ -156,16 +156,16 @@ doit (int ksize __attribute__((__unused__))) { ...@@ -156,16 +156,16 @@ doit (int ksize __attribute__((__unused__))) {
} }
// Now we have a bunch of leaves, all of which are with 100 bytes of full. // Now we have a bunch of leaves, all of which are with 100 bytes of full.
for (i=0; i+1<FT_FANOUT; i++) { for (i=0; i+1<16; i++) {
char key[TOKU_PSIZE]; char key[TOKU_PSIZE];
keylens[i]=1+snprintf(key, TOKU_PSIZE, "%08d", (i+1)*10000); keylens[i]=1+snprintf(key, TOKU_PSIZE, "%08d", (i+1)*10000);
keys[i]=toku_strdup(key); keys[i]=toku_strdup(key);
} }
r = toku_testsetup_nonleaf(t, 1, &bnode, FT_FANOUT, cnodes, keys, keylens); r = toku_testsetup_nonleaf(t, 1, &bnode, 16, cnodes, keys, keylens);
assert(r==0); assert(r==0);
for (i=0; i+1<FT_FANOUT; i++) { for (i=0; i+1<16; i++) {
toku_free(keys[i]); toku_free(keys[i]);
} }
......
...@@ -688,6 +688,29 @@ toku_db_get_compression_method(DB *db, enum toku_compression_method *compression ...@@ -688,6 +688,29 @@ toku_db_get_compression_method(DB *db, enum toku_compression_method *compression
return 0; return 0;
} }
static int
toku_db_change_fanout(DB *db, unsigned int fanout) {
HANDLE_PANICKED_DB(db);
if (!db_opened(db)) return EINVAL;
toku_ft_handle_set_fanout(db->i->ft_handle, fanout);
return 0;
}
static int
toku_db_set_fanout(DB *db, unsigned int fanout) {
HANDLE_PANICKED_DB(db);
if (db_opened(db)) return EINVAL;
toku_ft_handle_set_fanout(db->i->ft_handle, fanout);
return 0;
}
static int
toku_db_get_fanout(DB *db, unsigned int *fanout) {
HANDLE_PANICKED_DB(db);
toku_ft_handle_get_fanout(db->i->ft_handle, fanout);
return 0;
}
static int static int
toku_db_get_fractal_tree_info64(DB *db, uint64_t *num_blocks_allocated, uint64_t *num_blocks_in_use, uint64_t *size_allocated, uint64_t *size_in_use) { toku_db_get_fractal_tree_info64(DB *db, uint64_t *num_blocks_allocated, uint64_t *num_blocks_in_use, uint64_t *size_allocated, uint64_t *size_in_use) {
HANDLE_PANICKED_DB(db); HANDLE_PANICKED_DB(db);
...@@ -1034,6 +1057,9 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) { ...@@ -1034,6 +1057,9 @@ toku_db_create(DB ** db, DB_ENV * env, uint32_t flags) {
USDB(set_compression_method); USDB(set_compression_method);
USDB(get_compression_method); USDB(get_compression_method);
USDB(change_compression_method); USDB(change_compression_method);
USDB(set_fanout);
USDB(get_fanout);
USDB(change_fanout);
USDB(set_flags); USDB(set_flags);
USDB(get_flags); USDB(get_flags);
USDB(fd); USDB(fd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment