Commit db9ef1a5 authored by Yoni Fogel's avatar Yoni Fogel

Refs Tokutek/ft-index#46 Comments, minor renames, reworked...

Refs Tokutek/ft-index#46 Comments, minor renames, reworked bn_data::move_leafentries s.t. splits do not significantly increase memory usage of basement nodes and to be cleaner
parent 7f2e8e72
This diff is collapsed.
...@@ -193,9 +193,11 @@ class bn_data { ...@@ -193,9 +193,11 @@ class bn_data {
void verify_mempool(void); void verify_mempool(void);
// size() of key dmt // size() of key dmt
// TODO(yoni): maybe rename to something like num_klpairs
uint32_t dmt_size(void) const; uint32_t dmt_size(void) const;
// iterate() on key dmt (and associated leafentries) // iterate() on key dmt (and associated leafentries)
// TODO(yoni): rename to iterate
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)> int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int dmt_iterate(iterate_extra_t *const iterate_extra) const { int dmt_iterate(iterate_extra_t *const iterate_extra) const {
...@@ -203,6 +205,7 @@ class bn_data { ...@@ -203,6 +205,7 @@ class bn_data {
} }
// iterate_on_range() on key dmt (and associated leafentries) // iterate_on_range() on key dmt (and associated leafentries)
// TODO(yoni): rename to iterate_on_range
template<typename iterate_extra_t, template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)> int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int dmt_iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const { int dmt_iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
...@@ -272,10 +275,7 @@ class bn_data { ...@@ -272,10 +275,7 @@ class bn_data {
// Move leafentries (and associated key/keylens) from this basement node to dest_bd // Move leafentries (and associated key/keylens) from this basement node to dest_bd
// Moves indexes [lbi-ube) // Moves indexes [lbi-ube)
__attribute__((__nonnull__)) __attribute__((__nonnull__))
void move_leafentries_to(bn_data* dest_bd, void split_klpairs(bn_data* dest_bd, uint32_t first_index_for_dest);
uint32_t lbi, //lower bound inclusive
uint32_t ube //upper bound exclusive
);
// Destroy this basement node and free memory. // Destroy this basement node and free memory.
void destroy(void); void destroy(void);
...@@ -321,6 +321,8 @@ class bn_data { ...@@ -321,6 +321,8 @@ class bn_data {
// Between calling prepare_to_serialize and actually serializing, the basement node may not be modified // Between calling prepare_to_serialize and actually serializing, the basement node may not be modified
void prepare_to_serialize(void); void prepare_to_serialize(void);
//TODO(yoni): go to serialize_ftnode_partition and move prepare/header/etc (and wbufwriteleafentry) into here and add just one external function: serialize_to_wbuf()
// Serialize the basement node header to a wbuf // Serialize the basement node header to a wbuf
// Requires prepare_to_serialize() to have been called first. // Requires prepare_to_serialize() to have been called first.
void serialize_header(struct wbuf *wb) const; void serialize_header(struct wbuf *wb) const;
...@@ -344,10 +346,10 @@ class bn_data { ...@@ -344,10 +346,10 @@ class bn_data {
+ 0; + 0;
private: private:
// move_leafentry_extra should be a local class in move_leafentries_to, but // split_klpairs_extra should be a local class in split_klpairs, but
// the dmt template parameter for iterate needs linkage, so it has to be a // the dmt template parameter for iterate needs linkage, so it has to be a
// separate class, but we want it to be able to call e.g. add_key // separate class, but we want it to be able to call e.g. add_key
friend class move_leafentry_extra; friend class split_klpairs_extra;
// Allocates space in the mempool. // Allocates space in the mempool.
// If there is insufficient space, the mempool is enlarged and leafentries may be shuffled to reduce fragmentation. // If there is insufficient space, the mempool is enlarged and leafentries may be shuffled to reduce fragmentation.
...@@ -364,7 +366,7 @@ class bn_data { ...@@ -364,7 +366,7 @@ class bn_data {
void add_key(uint32_t keylen); void add_key(uint32_t keylen);
// Note that multiple keys were added (for maintaining disk-size of this basement node) // Note that multiple keys were added (for maintaining disk-size of this basement node)
void add_keys(uint32_t n_keys, uint32_t combined_keylen); void add_keys(uint32_t n_keys, uint32_t combined_klpair_len);
// Note that a key was removed (for maintaining disk-size of this basement node) // Note that a key was removed (for maintaining disk-size of this basement node)
void remove_key(uint32_t keylen); void remove_key(uint32_t keylen);
...@@ -375,7 +377,7 @@ class bn_data { ...@@ -375,7 +377,7 @@ class bn_data {
friend class bndata_bugfix_test; friend class bndata_bugfix_test;
// Get the serialized size of a klpair. // Get the serialized size of a klpair.
// As of Jan 14, 2014, serialized size of a klpair is independent of if this basement node has fixed-length keys. // As of Jan 14, 2014, serialized size of a klpair is independent of whether this basement node has fixed-length keys.
uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const; uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const;
// The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool) // The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool)
...@@ -385,6 +387,6 @@ class bn_data { ...@@ -385,6 +387,6 @@ class bn_data {
// all keys will be first followed by all leafentries (both in sorted order) // all keys will be first followed by all leafentries (both in sorted order)
void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version, void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version,
uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length, uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length,
uint32_t fixed_key_length); uint32_t fixed_klpair_length);
}; };
...@@ -754,7 +754,8 @@ move_leafentries( ...@@ -754,7 +754,8 @@ move_leafentries(
) )
//Effect: move leafentries in the range [lbi, upe) from src_omt to newly created dest_omt //Effect: move leafentries in the range [lbi, upe) from src_omt to newly created dest_omt
{ {
src_bn->data_buffer.move_leafentries_to(&dest_bn->data_buffer, lbi, ube); invariant(ube == src_bn->data_buffer.dmt_size());
src_bn->data_buffer.split_klpairs(&dest_bn->data_buffer, lbi);
} }
static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_node) { static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_node) {
......
...@@ -327,7 +327,7 @@ serialize_ftnode_partition_size (FTNODE node, int i) ...@@ -327,7 +327,7 @@ serialize_ftnode_partition_size (FTNODE node, int i)
return result; return result;
} }
#define FTNODE_PARTITION_OMT_LEAVES 0xaa #define FTNODE_PARTITION_DMT_LEAVES 0xaa
#define FTNODE_PARTITION_FIFO_MSG 0xbb #define FTNODE_PARTITION_FIFO_MSG 0xbb
static void static void
...@@ -374,7 +374,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) { ...@@ -374,7 +374,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
serialize_nonleaf_childinfo(BNC(node, i), &wb); serialize_nonleaf_childinfo(BNC(node, i), &wb);
} }
else { else {
unsigned char ch = FTNODE_PARTITION_OMT_LEAVES; unsigned char ch = FTNODE_PARTITION_DMT_LEAVES;
bn_data* bd = BLB_DATA(node, i); bn_data* bd = BLB_DATA(node, i);
wbuf_nocrc_char(&wb, ch); wbuf_nocrc_char(&wb, ch);
...@@ -1553,7 +1553,7 @@ deserialize_ftnode_partition( ...@@ -1553,7 +1553,7 @@ deserialize_ftnode_partition(
BP_WORKDONE(node, childnum) = 0; BP_WORKDONE(node, childnum) = 0;
} }
else { else {
assert(ch == FTNODE_PARTITION_OMT_LEAVES); assert(ch == FTNODE_PARTITION_DMT_LEAVES);
BLB_SEQINSERT(node, childnum) = 0; BLB_SEQINSERT(node, childnum) = 0;
uint32_t num_entries = rbuf_int(&rb); uint32_t num_entries = rbuf_int(&rb);
// we are now at the first byte of first leafentry // we are now at the first byte of first leafentry
......
...@@ -927,7 +927,7 @@ le_pack(ULE ule, // data to be packed into new leafentry ...@@ -927,7 +927,7 @@ le_pack(ULE ule, // data to be packed into new leafentry
rval = 0; rval = 0;
goto cleanup; goto cleanup;
} }
found_insert:; found_insert:
memsize = le_memsize_from_ule(ule); memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry; LEAFENTRY new_leafentry;
get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry); get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry);
......
...@@ -183,6 +183,7 @@ size_t toku_mempool_get_size(const struct mempool *mp) { ...@@ -183,6 +183,7 @@ size_t toku_mempool_get_size(const struct mempool *mp) {
return mp->size; return mp->size;
} }
// TODO(yoni): unify the toku_mempool_get*_size and toku_mempool_get*_space functions (use either size or space but not both)
size_t toku_mempool_get_frag_size(const struct mempool *mp) { size_t toku_mempool_get_frag_size(const struct mempool *mp) {
return mp->frag_size; return mp->frag_size;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment