Commit 494856ed authored by Yoni Fogel's avatar Yoni Fogel

Refs Tokutek/ft-index#46 Comments, minor renames, reworked...

Refs Tokutek/ft-index#46 Comments, minor renames, reworked bn_data::move_leafentries s.t. splits do not significantly increase memory usage of basement nodes and to be cleaner
parent 78f12006
This diff is collapsed.
......@@ -193,9 +193,11 @@ class bn_data {
void verify_mempool(void);
// size() of key dmt
// TODO(yoni): maybe rename to something like num_klpairs
uint32_t dmt_size(void) const;
// iterate() on key dmt (and associated leafentries)
// TODO(yoni): rename to iterate
template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int dmt_iterate(iterate_extra_t *const iterate_extra) const {
......@@ -203,6 +205,7 @@ class bn_data {
}
// iterate_on_range() on key dmt (and associated leafentries)
// TODO(yoni): rename to iterate_on_range
template<typename iterate_extra_t,
int (*f)(const void * key, const uint32_t keylen, const LEAFENTRY &, const uint32_t, iterate_extra_t *const)>
int dmt_iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const {
......@@ -272,10 +275,7 @@ class bn_data {
// Move leafentries (and associated key/keylens) from this basement node to dest_bd
// Moves indexes [lbi-ube)
__attribute__((__nonnull__))
void move_leafentries_to(bn_data* dest_bd,
uint32_t lbi, //lower bound inclusive
uint32_t ube //upper bound exclusive
);
void split_klpairs(bn_data* dest_bd, uint32_t first_index_for_dest);
// Destroy this basement node and free memory.
void destroy(void);
......@@ -321,6 +321,8 @@ class bn_data {
// Between calling prepare_to_serialize and actually serializing, the basement node may not be modified
void prepare_to_serialize(void);
//TODO(yoni): go to serialize_ftnode_partition and move prepare/header/etc (and wbufwriteleafentry) into here and add just one external function: serialize_to_wbuf()
// Serialize the basement node header to a wbuf
// Requires prepare_to_serialize() to have been called first.
void serialize_header(struct wbuf *wb) const;
......@@ -344,10 +346,10 @@ class bn_data {
+ 0;
private:
// move_leafentry_extra should be a local class in move_leafentries_to, but
// split_klpairs_extra should be a local class in split_klpairs, but
// the dmt template parameter for iterate needs linkage, so it has to be a
// separate class, but we want it to be able to call e.g. add_key
friend class move_leafentry_extra;
friend class split_klpairs_extra;
// Allocates space in the mempool.
// If there is insufficient space, the mempool is enlarged and leafentries may be shuffled to reduce fragmentation.
......@@ -364,7 +366,7 @@ class bn_data {
void add_key(uint32_t keylen);
// Note that multiple keys were added (for maintaining disk-size of this basement node)
void add_keys(uint32_t n_keys, uint32_t combined_keylen);
void add_keys(uint32_t n_keys, uint32_t combined_klpair_len);
// Note that a key was removed (for maintaining disk-size of this basement node)
void remove_key(uint32_t keylen);
......@@ -375,7 +377,7 @@ class bn_data {
friend class bndata_bugfix_test;
// Get the serialized size of a klpair.
// As of Jan 14, 2014, serialized size of a klpair is independent of if this basement node has fixed-length keys.
// As of Jan 14, 2014, serialized size of a klpair is independent of whether this basement node has fixed-length keys.
uint32_t klpair_disksize(const uint32_t klpair_len, const klpair_struct *klpair) const;
// The disk/memory size of all keys. (Note that the size of memory for the leafentries is maintained by m_buffer_mempool)
......@@ -385,6 +387,6 @@ class bn_data {
// all keys will be first followed by all leafentries (both in sorted order)
void initialize_from_separate_keys_and_vals(uint32_t num_entries, struct rbuf *rb, uint32_t data_size, uint32_t version,
uint32_t key_data_size, uint32_t val_data_size, bool all_keys_same_length,
uint32_t fixed_key_length);
uint32_t fixed_klpair_length);
};
......@@ -754,7 +754,8 @@ move_leafentries(
)
//Effect: move leafentries in the range [lbi, upe) from src_omt to newly created dest_omt
{
src_bn->data_buffer.move_leafentries_to(&dest_bn->data_buffer, lbi, ube);
invariant(ube == src_bn->data_buffer.dmt_size());
src_bn->data_buffer.split_klpairs(&dest_bn->data_buffer, lbi);
}
static void ftnode_finalize_split(FTNODE node, FTNODE B, MSN max_msn_applied_to_node) {
......
......@@ -327,7 +327,7 @@ serialize_ftnode_partition_size (FTNODE node, int i)
return result;
}
#define FTNODE_PARTITION_OMT_LEAVES 0xaa
#define FTNODE_PARTITION_DMT_LEAVES 0xaa
#define FTNODE_PARTITION_FIFO_MSG 0xbb
static void
......@@ -374,7 +374,7 @@ serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
serialize_nonleaf_childinfo(BNC(node, i), &wb);
}
else {
unsigned char ch = FTNODE_PARTITION_OMT_LEAVES;
unsigned char ch = FTNODE_PARTITION_DMT_LEAVES;
bn_data* bd = BLB_DATA(node, i);
wbuf_nocrc_char(&wb, ch);
......@@ -1553,7 +1553,7 @@ deserialize_ftnode_partition(
BP_WORKDONE(node, childnum) = 0;
}
else {
assert(ch == FTNODE_PARTITION_OMT_LEAVES);
assert(ch == FTNODE_PARTITION_DMT_LEAVES);
BLB_SEQINSERT(node, childnum) = 0;
uint32_t num_entries = rbuf_int(&rb);
// we are now at the first byte of first leafentry
......
......@@ -236,14 +236,14 @@ static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p);
static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p);
static void get_space_for_le(
bn_data* data_buffer,
bn_data* data_buffer,
uint32_t idx,
void* keyp,
uint32_t keylen,
uint32_t old_le_size,
size_t size,
size_t size,
LEAFENTRY* new_le_space
)
)
{
if (data_buffer == NULL) {
CAST_FROM_VOIDP(*new_le_space, toku_xmalloc(size));
......@@ -889,7 +889,7 @@ update_le_status(ULE ule, size_t memsize) {
}
// Purpose is to return a newly allocated leaf entry in packed format, or
// return null if leaf entry should be destroyed (if no transaction records
// return null if leaf entry should be destroyed (if no transaction records
// are for inserts).
// Transaction records in packed le are stored inner to outer (first xr is innermost),
// with some information extracted out of the transaction records into the header.
......@@ -927,7 +927,7 @@ le_pack(ULE ule, // data to be packed into new leafentry
rval = 0;
goto cleanup;
}
found_insert:;
found_insert:
memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry;
get_space_for_le(data_buffer, idx, keyp, keylen, old_le_size, memsize, &new_leafentry);
......@@ -982,7 +982,7 @@ found_insert:;
for (i = 0; i < ule->num_cuxrs; i++) {
p += uxr_pack_length_and_bit(ule->uxrs + ule->num_cuxrs - 1 - i, p);
}
//pack interesting values inner to outer
if (ule->num_puxrs!=0) {
UXR innermost = ule->uxrs + ule->num_cuxrs + ule->num_puxrs - 1;
......@@ -1020,7 +1020,7 @@ found_insert:;
size_t bytes_written;
bytes_written = (size_t)p - (size_t)new_leafentry;
invariant(bytes_written == memsize);
#if ULE_DEBUG
if (omt) { //Disable recursive debugging.
size_t memsize_verify = leafentry_memsize(new_leafentry);
......
......@@ -183,6 +183,7 @@ size_t toku_mempool_get_size(const struct mempool *mp) {
return mp->size;
}
// TODO(yoni): unify the toku_mempool_get*_size and toku_mempool_get*_space functions (use either size or space but not both)
size_t toku_mempool_get_frag_size(const struct mempool *mp) {
return mp->frag_size;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment