Commit 2b5e4f01 authored by John Esmet's avatar John Esmet

FT-273 Use a comparator object for fractal tree key comparisons when

possible, untangling the dependency between parameters 'ft->compare_fun' and
'ft->cmp_descriptor' in a sensible way. It is now much less necessary to
create a fake db (good) and some long parameter lists are now shorter
(it's a start)
parent cbab8d8e
...@@ -95,36 +95,51 @@ PATENT RIGHTS GRANT: ...@@ -95,36 +95,51 @@ PATENT RIGHTS GRANT:
#include <ft/ybt.h> #include <ft/ybt.h>
#include <ft/fttypes.h> #include <ft/fttypes.h>
#include <portability/memory.h>
namespace toku { namespace toku {
// a comparator object encapsulates the data necessary for // a comparator object encapsulates the data necessary for
// comparing two keys in a fractal tree. it further understands // comparing two keys in a fractal tree. it further understands
// that points may be positive or negative infinity. // that points may be positive or negative infinity.
class comparator { class comparator {
public: public:
void set_descriptor(DESCRIPTOR desc) { void create(ft_compare_func cmp, DESCRIPTOR desc) {
m_fake_db.cmp_descriptor = desc; _cmp = cmp;
XCALLOC(_fake_db);
_fake_db->cmp_descriptor = desc;
} }
void create(ft_compare_func cmp, DESCRIPTOR desc) { void destroy() {
m_cmp = cmp; toku_free(_fake_db);
memset(&m_fake_db, 0, sizeof(m_fake_db)); }
m_fake_db.cmp_descriptor = desc;
const DESCRIPTOR_S *get_descriptor() const {
return _fake_db->cmp_descriptor;
}
ft_compare_func get_compare_func() const {
return _cmp;
}
void set_descriptor(DESCRIPTOR desc) {
_fake_db->cmp_descriptor = desc;
} }
int compare(const DBT *a, const DBT *b) { int operator()(const DBT *a, const DBT *b) const {
// TODO: add an unlikely() compiler note for this branch
if (toku_dbt_is_infinite(a) || toku_dbt_is_infinite(b)) { if (toku_dbt_is_infinite(a) || toku_dbt_is_infinite(b)) {
return toku_dbt_infinite_compare(a, b); return toku_dbt_infinite_compare(a, b);
} else { } else {
return m_cmp(&m_fake_db, a, b); // yikes, const sadness here
return _cmp(const_cast<DB *>(_fake_db), a, b);
} }
} }
private: private:
struct __toku_db m_fake_db; DB *_fake_db;
ft_compare_func m_cmp; ft_compare_func _cmp;
}; };
} /* namespace toku */ } /* namespace toku */
...@@ -213,8 +213,7 @@ static int ft_cursor_search(FT_CURSOR cursor, ft_search *search, ...@@ -213,8 +213,7 @@ static int ft_cursor_search(FT_CURSOR cursor, ft_search *search,
} }
static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) { static inline int compare_k_x(FT_HANDLE ft_handle, const DBT *k, const DBT *x) {
FAKE_DB(db, &ft_handle->ft->cmp_descriptor); return ft_handle->ft->cmp(k, x);
return ft_handle->ft->compare_fun(&db, k, x);
} }
int toku_ft_cursor_compare_one(const ft_search &UU(search), const DBT *UU(x)) { int toku_ft_cursor_compare_one(const ft_search &UU(search), const DBT *UU(x)) {
...@@ -290,11 +289,10 @@ int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *g ...@@ -290,11 +289,10 @@ int toku_ft_cursor_last(FT_CURSOR cursor, FT_GET_CALLBACK_FUNCTION getf, void *g
int toku_ft_cursor_check_restricted_range(FT_CURSOR c, bytevec key, ITEMLEN keylen) { int toku_ft_cursor_check_restricted_range(FT_CURSOR c, bytevec key, ITEMLEN keylen) {
if (c->out_of_range_error) { if (c->out_of_range_error) {
FT ft = c->ft_handle->ft; FT ft = c->ft_handle->ft;
FAKE_DB(db, &ft->cmp_descriptor);
DBT found_key; DBT found_key;
toku_fill_dbt(&found_key, key, keylen); toku_fill_dbt(&found_key, key, keylen);
if ((!c->left_is_neg_infty && c->direction <= 0 && ft->compare_fun(&db, &found_key, &c->range_lock_left_key) < 0) || if ((!c->left_is_neg_infty && c->direction <= 0 && ft->cmp(&found_key, &c->range_lock_left_key) < 0) ||
(!c->right_is_pos_infty && c->direction >= 0 && ft->compare_fun(&db, &found_key, &c->range_lock_right_key) > 0)) { (!c->right_is_pos_infty && c->direction >= 0 && ft->cmp(&found_key, &c->range_lock_right_key) > 0)) {
invariant(c->out_of_range_error); invariant(c->out_of_range_error);
return c->out_of_range_error; return c->out_of_range_error;
} }
......
...@@ -406,13 +406,8 @@ ctm_pick_child(FT ft, ...@@ -406,13 +406,8 @@ ctm_pick_child(FT ft,
int childnum; int childnum;
if (parent->height == 1 && ctme->is_last_child) { if (parent->height == 1 && ctme->is_last_child) {
childnum = parent->n_children - 1; childnum = parent->n_children - 1;
} } else {
else { childnum = toku_ftnode_which_child(parent, &ctme->target_key, ft->cmp);
childnum = toku_ftnode_which_child(
parent,
&ctme->target_key,
&ft->cmp_descriptor,
ft->compare_fun);
} }
return childnum; return childnum;
} }
...@@ -1703,9 +1698,8 @@ void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID p ...@@ -1703,9 +1698,8 @@ void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID p
flow_deltas[1] = memsize_in_buffer; flow_deltas[1] = memsize_in_buffer;
} }
toku_ftnode_put_msg( toku_ftnode_put_msg(
ft->compare_fun, ft->cmp,
ft->update_fun, ft->update_fun,
&ft->cmp_descriptor,
child, child,
-1, -1,
msg, msg,
......
...@@ -184,10 +184,7 @@ hot_just_pick_child(FT ft, ...@@ -184,10 +184,7 @@ hot_just_pick_child(FT ft,
childnum = 0; childnum = 0;
} else { } else {
// Find the pivot boundary. // Find the pivot boundary.
childnum = toku_ftnode_hot_next_child(parent, childnum = toku_ftnode_hot_next_child(parent, &flusher->highest_pivot_key, ft->cmp);
&flusher->highest_pivot_key,
&ft->cmp_descriptor,
ft->compare_fun);
} }
return childnum; return childnum;
...@@ -386,8 +383,7 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right, ...@@ -386,8 +383,7 @@ toku_ft_hot_optimize(FT_HANDLE ft_handle, DBT* left, DBT* right,
else if (right) { else if (right) {
// if we have flushed past the bounds set for us, // if we have flushed past the bounds set for us,
// set rightmost_leaf_seen so we exit // set rightmost_leaf_seen so we exit
FAKE_DB(db, &ft_handle->ft->cmp_descriptor); int cmp = ft_handle->ft->cmp(&flusher.max_current_key, right);
int cmp = ft_handle->ft->compare_fun(&db, &flusher.max_current_key, right);
if (cmp > 0) { if (cmp > 0) {
flusher.rightmost_leaf_seen = 1; flusher.rightmost_leaf_seen = 1;
} }
......
...@@ -111,9 +111,9 @@ PATENT RIGHTS GRANT: ...@@ -111,9 +111,9 @@ PATENT RIGHTS GRANT:
#include "leafentry.h" #include "leafentry.h"
#include "block_table.h" #include "block_table.h"
#include "compress.h" #include "compress.h"
#include <util/mempool.h>
#include <util/omt.h> #include <util/omt.h>
#include "ft/bndata.h" #include "ft/bndata.h"
#include "ft/comparator.h"
#include "ft/rollback.h" #include "ft/rollback.h"
#include "ft/msg_buffer.h" #include "ft/msg_buffer.h"
...@@ -213,15 +213,18 @@ struct ft { ...@@ -213,15 +213,18 @@ struct ft {
CACHEFILE cf; CACHEFILE cf;
// unique id for dictionary // unique id for dictionary
DICTIONARY_ID dict_id; DICTIONARY_ID dict_id;
ft_compare_func compare_fun;
ft_update_func update_fun;
// protected by locktree // protected by locktree
DESCRIPTOR_S descriptor; DESCRIPTOR_S descriptor;
// protected by locktree and user. User
// makes sure this is only changed // protected by locktree and user.
// when no activity on tree // User makes sure this is only changed when no activity on tree
DESCRIPTOR_S cmp_descriptor; DESCRIPTOR_S cmp_descriptor;
// contains a pointer to cmp_descriptor (above) - their lifetimes are bound
toku::comparator cmp;
// the update function always utilizes the cmp_descriptor, not the regular one
ft_update_func update_fun;
// These are not read-only: // These are not read-only:
...@@ -272,7 +275,7 @@ typedef struct ft *FT; ...@@ -272,7 +275,7 @@ typedef struct ft *FT;
// descriptor. We don't bother setting any other fields because // descriptor. We don't bother setting any other fields because
// the comparison function doesn't need it, and we would like to // the comparison function doesn't need it, and we would like to
// reduce the CPU work done per comparison. // reduce the CPU work done per comparison.
#define FAKE_DB(db, desc) struct __toku_db db; do { db.cmp_descriptor = desc; } while (0) #define FAKE_DB(db, desc) struct __toku_db db; do { db.cmp_descriptor = const_cast<DESCRIPTOR>(desc); } while (0)
struct ft_options { struct ft_options {
unsigned int nodesize; unsigned int nodesize;
...@@ -390,14 +393,14 @@ void toku_serialize_ft_to_wbuf ( ...@@ -390,14 +393,14 @@ void toku_serialize_ft_to_wbuf (
DISKOFF translation_size_on_disk DISKOFF translation_size_on_disk
); );
int toku_deserialize_ft_from (int fd, LSN max_acceptable_lsn, FT *ft); int toku_deserialize_ft_from (int fd, LSN max_acceptable_lsn, FT *ft);
void toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset); void toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset);
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc); void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc);
// append a child node to a parent node // append a child node to a parent node
void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey); void toku_ft_nonleaf_append_child(FTNODE node, FTNODE child, const DBT *pivotkey);
// append a message to a nonleaf node child buffer // append a message to a nonleaf node child buffer
void toku_ft_append_to_child_buffer(ft_compare_func compare_fun, DESCRIPTOR desc, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val); void toku_ft_append_to_child_buffer(const toku::comparator &cmp, FTNODE node, int childnum, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const DBT *key, const DBT *val);
STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode); STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
......
This diff is collapsed.
...@@ -110,7 +110,7 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int node ...@@ -110,7 +110,7 @@ int toku_open_ft_handle (const char *fname, int is_create, FT_HANDLE *, int node
// ANY operations. to update the cmp descriptor after any operations have already happened, all handles // ANY operations. to update the cmp descriptor after any operations have already happened, all handles
// and transactions must close and reopen before the change, then you can update the cmp descriptor // and transactions must close and reopen before the change, then you can update the cmp descriptor
void toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, bool do_log, TOKUTXN txn, bool update_cmp_descriptor); void toku_ft_change_descriptor(FT_HANDLE t, const DBT* old_descriptor, const DBT* new_descriptor, bool do_log, TOKUTXN txn, bool update_cmp_descriptor);
uint32_t toku_serialize_descriptor_size(const DESCRIPTOR desc); uint32_t toku_serialize_descriptor_size(DESCRIPTOR desc);
void toku_ft_handle_create(FT_HANDLE *ft); void toku_ft_handle_create(FT_HANDLE *ft);
void toku_ft_set_flags(FT_HANDLE, unsigned int flags); void toku_ft_set_flags(FT_HANDLE, unsigned int flags);
......
...@@ -95,7 +95,7 @@ PATENT RIGHTS GRANT: ...@@ -95,7 +95,7 @@ PATENT RIGHTS GRANT:
// not version-sensitive because we only serialize a descriptor using the current layout_version // not version-sensitive because we only serialize a descriptor using the current layout_version
uint32_t uint32_t
toku_serialize_descriptor_size(const DESCRIPTOR desc) { toku_serialize_descriptor_size(DESCRIPTOR desc) {
//Checksum NOT included in this. Checksum only exists in header's version. //Checksum NOT included in this. Checksum only exists in header's version.
uint32_t size = 4; // four bytes for size of descriptor uint32_t size = 4; // four bytes for size of descriptor
size += desc->dbt.size; size += desc->dbt.size;
...@@ -103,7 +103,7 @@ toku_serialize_descriptor_size(const DESCRIPTOR desc) { ...@@ -103,7 +103,7 @@ toku_serialize_descriptor_size(const DESCRIPTOR desc) {
} }
static uint32_t static uint32_t
deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) { deserialize_descriptor_size(DESCRIPTOR desc, int layout_version) {
//Checksum NOT included in this. Checksum only exists in header's version. //Checksum NOT included in this. Checksum only exists in header's version.
uint32_t size = 4; // four bytes for size of descriptor uint32_t size = 4; // four bytes for size of descriptor
if (layout_version == FT_LAYOUT_VERSION_13) if (layout_version == FT_LAYOUT_VERSION_13)
...@@ -112,8 +112,7 @@ deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) { ...@@ -112,8 +112,7 @@ deserialize_descriptor_size(const DESCRIPTOR desc, int layout_version) {
return size; return size;
} }
void void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, DESCRIPTOR desc) {
toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc) {
wbuf_bytes(wb, desc->dbt.data, desc->dbt.size); wbuf_bytes(wb, desc->dbt.data, desc->dbt.size);
} }
...@@ -121,7 +120,7 @@ toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR des ...@@ -121,7 +120,7 @@ toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR des
//descriptor. //descriptor.
//Descriptors are NOT written during the header checkpoint process. //Descriptors are NOT written during the header checkpoint process.
void void
toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF offset) { toku_serialize_descriptor_contents_to_fd(int fd, DESCRIPTOR desc, DISKOFF offset) {
// make the checksum // make the checksum
int64_t size = toku_serialize_descriptor_size(desc)+4; //4 for checksum int64_t size = toku_serialize_descriptor_size(desc)+4; //4 for checksum
int64_t size_aligned = roundup_to_multiple(512, size); int64_t size_aligned = roundup_to_multiple(512, size);
...@@ -437,7 +436,8 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -437,7 +436,8 @@ int deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
if (r != 0) { if (r != 0) {
goto exit; goto exit;
} }
// copy descriptor to cmp_descriptor for #4541 // initialize for svn #4541
// TODO: use real dbt function
ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size; ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size;
ft->cmp_descriptor.dbt.data = toku_xmemdup(ft->descriptor.dbt.data, ft->descriptor.dbt.size); ft->cmp_descriptor.dbt.data = toku_xmemdup(ft->descriptor.dbt.data, ft->descriptor.dbt.size);
// Version 13 descriptors had an extra 4 bytes that we don't read // Version 13 descriptors had an extra 4 bytes that we don't read
......
...@@ -225,9 +225,8 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const ...@@ -225,9 +225,8 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, const
static size_t zero_flow_deltas[] = { 0, 0 }; static size_t zero_flow_deltas[] = { 0, 0 };
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, true);
toku_ftnode_put_msg( toku_ftnode_put_msg(
ft_handle->ft->compare_fun, ft_handle->ft->cmp,
ft_handle->ft->update_fun, ft_handle->ft->update_fun,
&ft_handle->ft->cmp_descriptor,
node, node,
-1, -1,
&msg, &msg,
...@@ -293,13 +292,14 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en ...@@ -293,13 +292,14 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE ft_handle, BLOCKNUM blocknum, en
assert(node->height>0); assert(node->height>0);
DBT k; DBT k;
int childnum = toku_ftnode_which_child(node, int childnum = toku_ftnode_which_child(node, toku_fill_dbt(&k, key, keylen), ft_handle->ft->cmp);
toku_fill_dbt(&k, key, keylen),
&ft_handle->ft->cmp_descriptor, ft_handle->ft->compare_fun);
XIDS xids_0 = xids_get_root_xids(); XIDS xids_0 = xids_get_root_xids();
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, NULL, testhelper_string_key_cmp); toku::comparator cmp;
cmp.create(testhelper_string_key_cmp, nullptr);
toku_bnc_insert_msg(BNC(node, childnum), key, keylen, val, vallen, msgtype, msn, xids_0, true, cmp);
cmp.destroy();
// Hack to get the test working. The problem is that this test // Hack to get the test working. The problem is that this test
// is directly queueing something in a FIFO instead of // is directly queueing something in a FIFO instead of
// using ft APIs. // using ft APIs.
......
...@@ -104,17 +104,13 @@ PATENT RIGHTS GRANT: ...@@ -104,17 +104,13 @@ PATENT RIGHTS GRANT:
static int static int
compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) { compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) {
FAKE_DB(db, &ft_handle->ft->cmp_descriptor); return ft_handle->ft->cmp(a, b);
int cmp = ft_handle->ft->compare_fun(&db, a, b);
return cmp;
} }
static int static int
compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, bytevec key, ITEMLEN keylen) { compare_pair_to_key (FT_HANDLE ft_handle, const DBT *a, bytevec key, ITEMLEN keylen) {
DBT y; DBT y;
FAKE_DB(db, &ft_handle->ft->cmp_descriptor); return ft_handle->ft->cmp(a, toku_fill_dbt(&y, key, keylen));
int cmp = ft_handle->ft->compare_fun(&db, a, toku_fill_dbt(&y, key, keylen));
return cmp;
} }
static int static int
...@@ -256,11 +252,7 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const ...@@ -256,11 +252,7 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const
int r = mt.fetch(i, &offset); int r = mt.fetch(i, &offset);
assert_zero(r); assert_zero(r);
if (i > 0) { if (i > 0) {
struct toku_msg_buffer_key_msn_cmp_extra extra; struct toku_msg_buffer_key_msn_cmp_extra extra(ft_handle->ft->cmp, msg_buffer);
ZERO_STRUCT(extra);
extra.desc = &ft_handle->ft->cmp_descriptor;
extra.cmp = ft_handle->ft->compare_fun;
extra.msg_buffer = msg_buffer;
if (toku_msg_buffer_key_msn_cmp(extra, last_offset, offset) >= 0) { if (toku_msg_buffer_key_msn_cmp(extra, last_offset, offset) >= 0) {
result = TOKUDB_NEEDS_REPAIR; result = TOKUDB_NEEDS_REPAIR;
break; break;
...@@ -274,13 +266,7 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const ...@@ -274,13 +266,7 @@ verify_sorted_by_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const
template<typename count_omt_t> template<typename count_omt_t>
static int static int
count_eq_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const count_omt_t &mt, const DBT *key, MSN msn) { count_eq_key_msn(FT_HANDLE ft_handle, message_buffer *msg_buffer, const count_omt_t &mt, const DBT *key, MSN msn) {
struct toku_msg_buffer_key_msn_heaviside_extra extra; struct toku_msg_buffer_key_msn_heaviside_extra extra(ft_handle->ft->cmp, msg_buffer, key, msn);
ZERO_STRUCT(extra);
extra.desc = &ft_handle->ft->cmp_descriptor;
extra.cmp = ft_handle->ft->compare_fun;
extra.msg_buffer = msg_buffer;
extra.key = key;
extra.msn = msn;
int r = mt.template find_zero<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(extra, nullptr, nullptr); int r = mt.template find_zero<struct toku_msg_buffer_key_msn_heaviside_extra, toku_msg_buffer_key_msn_heaviside>(extra, nullptr, nullptr);
int count; int count;
if (r == 0) { if (r == 0) {
......
...@@ -119,7 +119,10 @@ ft_destroy(FT ft) { ...@@ -119,7 +119,10 @@ ft_destroy(FT ft) {
//cannot destroy since it is still in use by CURRENT //cannot destroy since it is still in use by CURRENT
assert(ft->h->type == FT_CURRENT); assert(ft->h->type == FT_CURRENT);
toku_blocktable_destroy(&ft->blocktable); toku_blocktable_destroy(&ft->blocktable);
ft->cmp.destroy();
// TODO: use real dbt function
if (ft->descriptor.dbt.data) toku_free(ft->descriptor.dbt.data); if (ft->descriptor.dbt.data) toku_free(ft->descriptor.dbt.data);
// TODO: use real dbt function
if (ft->cmp_descriptor.dbt.data) toku_free(ft->cmp_descriptor.dbt.data); if (ft->cmp_descriptor.dbt.data) toku_free(ft->cmp_descriptor.dbt.data);
toku_ft_destroy_reflock(ft); toku_ft_destroy_reflock(ft);
toku_free(ft->h); toku_free(ft->h);
...@@ -384,7 +387,7 @@ static void ft_init(FT ft, FT_OPTIONS options, CACHEFILE cf) { ...@@ -384,7 +387,7 @@ static void ft_init(FT ft, FT_OPTIONS options, CACHEFILE cf) {
toku_list_init(&ft->live_ft_handles); toku_list_init(&ft->live_ft_handles);
ft->compare_fun = options->compare_fun; ft->cmp.create(options->compare_fun, &ft->descriptor);
ft->update_fun = options->update_fun; ft->update_fun = options->update_fun;
if (ft->cf != NULL) { if (ft->cf != NULL) {
...@@ -449,9 +452,6 @@ void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn) { ...@@ -449,9 +452,6 @@ void toku_ft_create(FT *ftp, FT_OPTIONS options, CACHEFILE cf, TOKUTXN txn) {
invariant(ftp); invariant(ftp);
FT XCALLOC(ft); FT XCALLOC(ft);
memset(&ft->descriptor, 0, sizeof(ft->descriptor));
memset(&ft->cmp_descriptor, 0, sizeof(ft->cmp_descriptor));
ft->h = ft_header_create(options, make_blocknum(0), (txn ? txn->txnid.parent_id64: TXNID_NONE)); ft->h = ft_header_create(options, make_blocknum(0), (txn ? txn->txnid.parent_id64: TXNID_NONE));
toku_ft_init_reflock(ft); toku_ft_init_reflock(ft);
...@@ -471,31 +471,27 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN ...@@ -471,31 +471,27 @@ int toku_read_ft_and_store_in_cachefile (FT_HANDLE ft_handle, CACHEFILE cf, LSN
// If the cachefile has not been initialized, then don't modify anything. // If the cachefile has not been initialized, then don't modify anything.
// max_acceptable_lsn is the latest acceptable checkpointed version of the file. // max_acceptable_lsn is the latest acceptable checkpointed version of the file.
{ {
{ FT ft = nullptr;
FT ft; if ((ft = (FT) toku_cachefile_get_userdata(cf)) != nullptr) {
if ((ft = (FT) toku_cachefile_get_userdata(cf))!=0) {
*header = ft; *header = ft;
assert(ft_handle->options.update_fun == ft->update_fun); assert(ft_handle->options.update_fun == ft->update_fun);
assert(ft_handle->options.compare_fun == ft->compare_fun);
return 0; return 0;
} }
}
FT ft = nullptr;
int r;
{
int fd = toku_cachefile_get_fd(cf); int fd = toku_cachefile_get_fd(cf);
r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft); int r = toku_deserialize_ft_from(fd, max_acceptable_lsn, &ft);
if (r == TOKUDB_BAD_CHECKSUM) { if (r == TOKUDB_BAD_CHECKSUM) {
fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf)); fprintf(stderr, "Checksum failure while reading header in file %s.\n", toku_cachefile_fname_in_env(cf));
assert(false); // make absolutely sure we crash before doing anything else assert(false); // make absolutely sure we crash before doing anything else
} else if (r != 0) {
return r;
} }
}
if (r!=0) return r;
// GCC 4.8 seems to get confused by the gotos in the deserialize code and think h is maybe uninitialized.
invariant_notnull(ft); invariant_notnull(ft);
ft->cf = cf; // intuitively, the comparator points to the FT's cmp descriptor
ft->compare_fun = ft_handle->options.compare_fun; ft->cmp.create(ft_handle->options.compare_fun, &ft->cmp_descriptor);
ft->update_fun = ft_handle->options.update_fun; ft->update_fun = ft_handle->options.update_fun;
ft->cf = cf;
toku_cachefile_set_userdata(cf, toku_cachefile_set_userdata(cf,
reinterpret_cast<void *>(ft), reinterpret_cast<void *>(ft),
ft_log_fassociate_during_checkpoint, ft_log_fassociate_during_checkpoint,
...@@ -632,7 +628,7 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX ...@@ -632,7 +628,7 @@ ft_handle_open_for_redirect(FT_HANDLE *new_ftp, const char *fname_in_env, TOKUTX
FT_HANDLE ft_handle; FT_HANDLE ft_handle;
assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid); assert(old_ft->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
toku_ft_handle_create(&ft_handle); toku_ft_handle_create(&ft_handle);
toku_ft_set_bt_compare(ft_handle, old_ft->compare_fun); toku_ft_set_bt_compare(ft_handle, old_ft->cmp.get_compare_func());
toku_ft_set_update(ft_handle, old_ft->update_fun); toku_ft_set_update(ft_handle, old_ft->update_fun);
toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize); toku_ft_handle_set_nodesize(ft_handle, old_ft->h->nodesize);
toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize); toku_ft_handle_set_basementnodesize(ft_handle, old_ft->h->basementnodesize);
...@@ -890,7 +886,7 @@ int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,i ...@@ -890,7 +886,7 @@ int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,i
} }
void void
toku_ft_update_descriptor(FT ft, DESCRIPTOR d) toku_ft_update_descriptor(FT ft, DESCRIPTOR desc)
// Effect: Changes the descriptor in a tree (log the change, make sure it makes it to disk eventually). // Effect: Changes the descriptor in a tree (log the change, make sure it makes it to disk eventually).
// requires: the ft is fully user-opened with a valid cachefile. // requires: the ft is fully user-opened with a valid cachefile.
// descriptor updates cannot happen in parallel for an FT // descriptor updates cannot happen in parallel for an FT
...@@ -898,7 +894,7 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d) ...@@ -898,7 +894,7 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d)
{ {
assert(ft->cf); assert(ft->cf);
int fd = toku_cachefile_get_fd(ft->cf); int fd = toku_cachefile_get_fd(ft->cf);
toku_ft_update_descriptor_with_fd(ft, d, fd); toku_ft_update_descriptor_with_fd(ft, desc, fd);
} }
// upadate the descriptor for an ft and serialize it using // upadate the descriptor for an ft and serialize it using
...@@ -907,27 +903,30 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d) ...@@ -907,27 +903,30 @@ toku_ft_update_descriptor(FT ft, DESCRIPTOR d)
// update a descriptor before the ft is fully opened and has // update a descriptor before the ft is fully opened and has
// a valid cachefile. // a valid cachefile.
void void
toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR d, int fd) { toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd) {
// the checksum is four bytes, so that's where the magic number comes from // the checksum is four bytes, so that's where the magic number comes from
// make space for the new descriptor and write it out to disk // make space for the new descriptor and write it out to disk
DISKOFF offset, size; DISKOFF offset, size;
size = toku_serialize_descriptor_size(d) + 4; size = toku_serialize_descriptor_size(desc) + 4;
toku_realloc_descriptor_on_disk(ft->blocktable, size, &offset, ft, fd); toku_realloc_descriptor_on_disk(ft->blocktable, size, &offset, ft, fd);
toku_serialize_descriptor_contents_to_fd(fd, d, offset); toku_serialize_descriptor_contents_to_fd(fd, desc, offset);
// cleanup the old descriptor and set the in-memory descriptor to the new one // cleanup the old descriptor and set the in-memory descriptor to the new one
// TODO: use real dbt function
if (ft->descriptor.dbt.data) { if (ft->descriptor.dbt.data) {
toku_free(ft->descriptor.dbt.data); toku_free(ft->descriptor.dbt.data);
} }
ft->descriptor.dbt.size = d->dbt.size; // TODO: use real dbt function
ft->descriptor.dbt.data = toku_memdup(d->dbt.data, d->dbt.size); ft->descriptor.dbt.size = desc->dbt.size;
ft->descriptor.dbt.data = toku_memdup(desc->dbt.data, desc->dbt.size);
} }
void void toku_ft_update_cmp_descriptor(FT ft) {
toku_ft_update_cmp_descriptor(FT ft) { // TODO: use real dbt function
if (ft->cmp_descriptor.dbt.data != NULL) { if (ft->cmp_descriptor.dbt.data != NULL) {
toku_free(ft->cmp_descriptor.dbt.data); toku_free(ft->cmp_descriptor.dbt.data);
} }
// TODO: use real dbt function
ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size; ft->cmp_descriptor.dbt.size = ft->descriptor.dbt.size;
ft->cmp_descriptor.dbt.data = toku_xmemdup( ft->cmp_descriptor.dbt.data = toku_xmemdup(
ft->descriptor.dbt.data, ft->descriptor.dbt.data,
...@@ -935,13 +934,11 @@ toku_ft_update_cmp_descriptor(FT ft) { ...@@ -935,13 +934,11 @@ toku_ft_update_cmp_descriptor(FT ft) {
); );
} }
DESCRIPTOR DESCRIPTOR toku_ft_get_descriptor(FT_HANDLE ft_handle) {
toku_ft_get_descriptor(FT_HANDLE ft_handle) {
return &ft_handle->ft->descriptor; return &ft_handle->ft->descriptor;
} }
DESCRIPTOR DESCRIPTOR toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) {
toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) {
return &ft_handle->ft->cmp_descriptor; return &ft_handle->ft->cmp_descriptor;
} }
......
...@@ -159,11 +159,11 @@ int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,i ...@@ -159,11 +159,11 @@ int toku_ft_iterate_fractal_tree_block_map(FT ft, int (*iter)(uint64_t,int64_t,i
// any operation has already occurred on the ft. // any operation has already occurred on the ft.
// see toku_ft_change_descriptor(), which is the transactional version // see toku_ft_change_descriptor(), which is the transactional version
// used by the ydb layer. it better describes the client contract. // used by the ydb layer. it better describes the client contract.
void toku_ft_update_descriptor(FT ft, DESCRIPTOR d); void toku_ft_update_descriptor(FT ft, DESCRIPTOR desc);
// use this version if the FT is not fully user-opened with a valid cachefile. // use this version if the FT is not fully user-opened with a valid cachefile.
// this is a clean hack to get deserialization code to update a descriptor // this is a clean hack to get deserialization code to update a descriptor
// while the FT and cf are in the process of opening, for upgrade purposes // while the FT and cf are in the process of opening, for upgrade purposes
void toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR d, int fd); void toku_ft_update_descriptor_with_fd(FT ft, DESCRIPTOR desc, int fd);
void toku_ft_update_cmp_descriptor(FT ft); void toku_ft_update_cmp_descriptor(FT ft);
// get the descriptor for a ft. safe to read as long as clients honor the // get the descriptor for a ft. safe to read as long as clients honor the
......
...@@ -873,19 +873,16 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA ...@@ -873,19 +873,16 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
} }
static void static void
deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, const toku::comparator &cmp) {
DESCRIPTOR desc, ft_compare_func cmp) {
int r; int r;
int n_in_this_buffer = rbuf_int(rbuf); int n_in_this_buffer = rbuf_int(rbuf);
int32_t *fresh_offsets = NULL, *stale_offsets = NULL; int32_t *fresh_offsets = NULL, *stale_offsets = NULL;
int32_t *broadcast_offsets = NULL; int32_t *broadcast_offsets = NULL;
int nfresh = 0, nstale = 0; int nfresh = 0, nstale = 0;
int nbroadcast_offsets = 0; int nbroadcast_offsets = 0;
if (cmp) {
XMALLOC_N(n_in_this_buffer, stale_offsets); XMALLOC_N(n_in_this_buffer, stale_offsets);
XMALLOC_N(n_in_this_buffer, fresh_offsets); XMALLOC_N(n_in_this_buffer, fresh_offsets);
XMALLOC_N(n_in_this_buffer, broadcast_offsets); XMALLOC_N(n_in_this_buffer, broadcast_offsets);
}
bnc->msg_buffer.resize(rbuf->size + 64); bnc->msg_buffer.resize(rbuf->size + 64);
for (int i = 0; i < n_in_this_buffer; i++) { for (int i = 0; i < n_in_this_buffer; i++) {
bytevec key; ITEMLEN keylen; bytevec key; ITEMLEN keylen;
...@@ -900,7 +897,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, ...@@ -900,7 +897,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
rbuf_bytes(rbuf, &key, &keylen); /* Returns a pointer into the rbuf. */ rbuf_bytes(rbuf, &key, &keylen); /* Returns a pointer into the rbuf. */
rbuf_bytes(rbuf, &val, &vallen); rbuf_bytes(rbuf, &val, &vallen);
int32_t *dest; int32_t *dest;
if (cmp) {
if (ft_msg_type_applies_once(type)) { if (ft_msg_type_applies_once(type)) {
if (is_fresh) { if (is_fresh) {
dest = &fresh_offsets[nfresh]; dest = &fresh_offsets[nfresh];
...@@ -915,9 +911,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, ...@@ -915,9 +911,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
} else { } else {
abort(); abort();
} }
} else {
dest = NULL;
}
// TODO: Function to parse stuff out of an rbuf into an FT_MSG // TODO: Function to parse stuff out of an rbuf into an FT_MSG
DBT k, v; DBT k, v;
FT_MSG_S msg = { FT_MSG_S msg = {
...@@ -929,8 +922,7 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, ...@@ -929,8 +922,7 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
} }
invariant(rbuf->ndone == rbuf->size); invariant(rbuf->ndone == rbuf->size);
if (cmp) { struct toku_msg_buffer_key_msn_cmp_extra extra = { .cmp = cmp, .msg_buffer = &bnc->msg_buffer };
struct toku_msg_buffer_key_msn_cmp_extra extra = { .desc = desc, .cmp = cmp, .msg_buffer = &bnc->msg_buffer };
r = toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp>::mergesort_r(fresh_offsets, nfresh, extra); r = toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp>::mergesort_r(fresh_offsets, nfresh, extra);
assert_zero(r); assert_zero(r);
bnc->fresh_message_tree.destroy(); bnc->fresh_message_tree.destroy();
...@@ -941,7 +933,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, ...@@ -941,7 +933,6 @@ deserialize_child_buffer_v26(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, n_in_this_buffer); bnc->stale_message_tree.create_steal_sorted_array(&stale_offsets, nstale, n_in_this_buffer);
bnc->broadcast_list.destroy(); bnc->broadcast_list.destroy();
bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer); bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer);
}
} }
// effect: deserialize a single message from rbuf and enqueue the result into the given message buffer // effect: deserialize a single message from rbuf and enqueue the result into the given message buffer
...@@ -1305,8 +1296,7 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) ...@@ -1305,8 +1296,7 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe)
// we find out what basement node the query cares about // we find out what basement node the query cares about
// and check if it is available // and check if it is available
bfe->child_to_read = toku_ft_search_which_child( bfe->child_to_read = toku_ft_search_which_child(
&bfe->ft->cmp_descriptor, bfe->ft->cmp,
bfe->ft->compare_fun,
node, node,
bfe->search bfe->search
); );
...@@ -1316,7 +1306,6 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe) ...@@ -1316,7 +1306,6 @@ update_bfe_using_ftnode(FTNODE node, struct ftnode_fetch_extra *bfe)
// we can possibly require is a single basement node // we can possibly require is a single basement node
// we find out what basement node the query cares about // we find out what basement node the query cares about
// and check if it is available // and check if it is available
paranoid_invariant(bfe->ft->compare_fun);
if (node->height == 0) { if (node->height == 0) {
int left_child = toku_bfe_leftmost_child_wanted(bfe, node); int left_child = toku_bfe_leftmost_child_wanted(bfe, node);
int right_child = toku_bfe_rightmost_child_wanted(bfe, node); int right_child = toku_bfe_rightmost_child_wanted(bfe, node);
...@@ -1398,8 +1387,7 @@ deserialize_ftnode_partition( ...@@ -1398,8 +1387,7 @@ deserialize_ftnode_partition(
struct sub_block *sb, struct sub_block *sb,
FTNODE node, FTNODE node,
int childnum, // which partition to deserialize int childnum, // which partition to deserialize
DESCRIPTOR desc, const toku::comparator &cmp
ft_compare_func cmp
) )
{ {
int r = 0; int r = 0;
...@@ -1421,7 +1409,7 @@ deserialize_ftnode_partition( ...@@ -1421,7 +1409,7 @@ deserialize_ftnode_partition(
NONLEAF_CHILDINFO bnc = BNC(node, childnum); NONLEAF_CHILDINFO bnc = BNC(node, childnum);
if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_26) { if (node->layout_version_read_from_disk <= FT_LAYOUT_VERSION_26) {
// Layout version <= 26 did not serialize sorted message trees to disk. // Layout version <= 26 did not serialize sorted message trees to disk.
deserialize_child_buffer_v26(bnc, &rb, desc, cmp); deserialize_child_buffer_v26(bnc, &rb, cmp);
} else { } else {
deserialize_child_buffer(bnc, &rb); deserialize_child_buffer(bnc, &rb);
} }
...@@ -1444,7 +1432,7 @@ deserialize_ftnode_partition( ...@@ -1444,7 +1432,7 @@ deserialize_ftnode_partition(
static int static int
decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_sb, FTNODE node, int child, decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_sb, FTNODE node, int child,
DESCRIPTOR desc, ft_compare_func cmp, tokutime_t *decompress_time) const toku::comparator &cmp, tokutime_t *decompress_time)
{ {
int r = 0; int r = 0;
tokutime_t t0 = toku_time_now(); tokutime_t t0 = toku_time_now();
...@@ -1452,7 +1440,7 @@ decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_s ...@@ -1452,7 +1440,7 @@ decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_s
tokutime_t t1 = toku_time_now(); tokutime_t t1 = toku_time_now();
if (r == 0) { if (r == 0) {
// at this point, sb->uncompressed_ptr stores the serialized node partition // at this point, sb->uncompressed_ptr stores the serialized node partition
r = deserialize_ftnode_partition(&curr_sb, node, child, desc, cmp); r = deserialize_ftnode_partition(&curr_sb, node, child, cmp);
} }
*decompress_time = t1 - t0; *decompress_time = t1 - t0;
...@@ -1772,11 +1760,9 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1772,11 +1760,9 @@ deserialize_and_upgrade_internal_node(FTNODE node,
int nfresh = 0; int nfresh = 0;
int nbroadcast_offsets = 0; int nbroadcast_offsets = 0;
if (bfe->ft->compare_fun) {
XMALLOC_N(n_in_this_buffer, fresh_offsets);
// We skip 'stale' offsets for upgraded nodes. // We skip 'stale' offsets for upgraded nodes.
XMALLOC_N(n_in_this_buffer, fresh_offsets);
XMALLOC_N(n_in_this_buffer, broadcast_offsets); XMALLOC_N(n_in_this_buffer, broadcast_offsets);
}
// Atomically decrement the header's MSN count by the number // Atomically decrement the header's MSN count by the number
// of messages in the buffer. // of messages in the buffer.
...@@ -1800,7 +1786,6 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1800,7 +1786,6 @@ deserialize_and_upgrade_internal_node(FTNODE node,
// <CER> can we factor this out? // <CER> can we factor this out?
int32_t *dest; int32_t *dest;
if (bfe->ft->compare_fun) {
if (ft_msg_type_applies_once(type)) { if (ft_msg_type_applies_once(type)) {
dest = &fresh_offsets[nfresh]; dest = &fresh_offsets[nfresh];
nfresh++; nfresh++;
...@@ -1810,9 +1795,6 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1810,9 +1795,6 @@ deserialize_and_upgrade_internal_node(FTNODE node,
} else { } else {
abort(); abort();
} }
} else {
dest = NULL;
}
// Increment our MSN, the last message should have the // Increment our MSN, the last message should have the
// newest/highest MSN. See above for a full explanation. // newest/highest MSN. See above for a full explanation.
...@@ -1827,9 +1809,7 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1827,9 +1809,7 @@ deserialize_and_upgrade_internal_node(FTNODE node,
xids_destroy(&xids); xids_destroy(&xids);
} }
if (bfe->ft->compare_fun) { struct toku_msg_buffer_key_msn_cmp_extra extra = { .cmp = bfe->ft->cmp,
struct toku_msg_buffer_key_msn_cmp_extra extra = { .desc = &bfe->ft->cmp_descriptor,
.cmp = bfe->ft->compare_fun,
.msg_buffer = &bnc->msg_buffer }; .msg_buffer = &bnc->msg_buffer };
typedef toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp> key_msn_sort; typedef toku::sort<int32_t, const struct toku_msg_buffer_key_msn_cmp_extra, toku_msg_buffer_key_msn_cmp> key_msn_sort;
r = key_msn_sort::mergesort_r(fresh_offsets, nfresh, extra); r = key_msn_sort::mergesort_r(fresh_offsets, nfresh, extra);
...@@ -1839,7 +1819,6 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1839,7 +1819,6 @@ deserialize_and_upgrade_internal_node(FTNODE node,
bnc->broadcast_list.destroy(); bnc->broadcast_list.destroy();
bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer); bnc->broadcast_list.create_steal_sorted_array(&broadcast_offsets, nbroadcast_offsets, n_in_this_buffer);
} }
}
// Assign the highest msn from our upgrade message buffers // Assign the highest msn from our upgrade message buffers
node->max_msn_applied_to_node_on_disk = highest_msn; node->max_msn_applied_to_node_on_disk = highest_msn;
...@@ -2259,7 +2238,7 @@ deserialize_ftnode_from_rbuf( ...@@ -2259,7 +2238,7 @@ deserialize_ftnode_from_rbuf(
// case where we read and decompress the partition // case where we read and decompress the partition
tokutime_t partition_decompress_time; tokutime_t partition_decompress_time;
r = decompress_and_deserialize_worker(curr_rbuf, curr_sb, node, i, r = decompress_and_deserialize_worker(curr_rbuf, curr_sb, node, i,
&bfe->ft->cmp_descriptor, bfe->ft->compare_fun, &partition_decompress_time); bfe->ft->cmp, &partition_decompress_time);
decompress_time += partition_decompress_time; decompress_time += partition_decompress_time;
if (r != 0) { if (r != 0) {
goto cleanup; goto cleanup;
...@@ -2365,7 +2344,7 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i ...@@ -2365,7 +2344,7 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
// deserialize // deserialize
tokutime_t t2 = toku_time_now(); tokutime_t t2 = toku_time_now();
r = deserialize_ftnode_partition(&curr_sb, node, childnum, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun); r = deserialize_ftnode_partition(&curr_sb, node, childnum, bfe->ft->cmp);
tokutime_t t3 = toku_time_now(); tokutime_t t3 = toku_time_now();
...@@ -2409,7 +2388,7 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fet ...@@ -2409,7 +2388,7 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fet
tokutime_t t1 = toku_time_now(); tokutime_t t1 = toku_time_now();
r = deserialize_ftnode_partition(curr_sb, node, childnum, &bfe->ft->cmp_descriptor, bfe->ft->compare_fun); r = deserialize_ftnode_partition(curr_sb, node, childnum, bfe->ft->cmp);
tokutime_t t2 = toku_time_now(); tokutime_t t2 = toku_time_now();
......
...@@ -101,10 +101,6 @@ PATENT RIGHTS GRANT: ...@@ -101,10 +101,6 @@ PATENT RIGHTS GRANT:
// A LE_CURSOR is good for scanning a FT from beginning to end. Useful for hot indexing. // A LE_CURSOR is good for scanning a FT from beginning to end. Useful for hot indexing.
struct le_cursor { struct le_cursor {
// TODO: remove DBs from the ft layer comparison function
// so this is never necessary
// use a fake db for comparisons.
struct __toku_db fake_db;
FT_CURSOR ft_cursor; FT_CURSOR ft_cursor;
bool neg_infinity; // true when the le cursor is positioned at -infinity (initial setting) bool neg_infinity; // true when the le cursor is positioned at -infinity (initial setting)
bool pos_infinity; // true when the le cursor is positioned at +infinity (when _next returns DB_NOTFOUND) bool pos_infinity; // true when the le cursor is positioned at +infinity (when _next returns DB_NOTFOUND)
...@@ -124,8 +120,6 @@ toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_handle, TOKUTXN ...@@ -124,8 +120,6 @@ toku_le_cursor_create(LE_CURSOR *le_cursor_result, FT_HANDLE ft_handle, TOKUTXN
toku_ft_cursor_set_leaf_mode(le_cursor->ft_cursor); toku_ft_cursor_set_leaf_mode(le_cursor->ft_cursor);
le_cursor->neg_infinity = false; le_cursor->neg_infinity = false;
le_cursor->pos_infinity = true; le_cursor->pos_infinity = true;
// zero out the fake DB. this is a rare operation so it's not too slow.
memset(&le_cursor->fake_db, 0, sizeof(le_cursor->fake_db));
} }
} }
...@@ -170,13 +164,9 @@ toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key) { ...@@ -170,13 +164,9 @@ toku_le_cursor_is_key_greater_or_equal(LE_CURSOR le_cursor, const DBT *key) {
} else if (le_cursor->pos_infinity) { } else if (le_cursor->pos_infinity) {
result = false; // all keys are less than +infinity result = false; // all keys are less than +infinity
} else { } else {
// get the comparison function and descriptor from the cursor's ft FT ft = le_cursor->ft_cursor->ft_handle->ft;
FT_HANDLE ft_handle = le_cursor->ft_cursor->ft_handle;
ft_compare_func keycompare = toku_ft_get_bt_compare(ft_handle);
le_cursor->fake_db.cmp_descriptor = toku_ft_get_cmp_descriptor(ft_handle);
// get the current position from the cursor and compare it to the given key. // get the current position from the cursor and compare it to the given key.
DBT *cursor_key = &le_cursor->ft_cursor->key; int r = ft->cmp(&le_cursor->ft_cursor->key, key);
int r = keycompare(&le_cursor->fake_db, cursor_key, key);
if (r <= 0) { if (r <= 0) {
result = true; // key is right of the cursor key result = true; // key is right of the cursor key
} else { } else {
......
This diff is collapsed.
...@@ -88,6 +88,7 @@ PATENT RIGHTS GRANT: ...@@ -88,6 +88,7 @@ PATENT RIGHTS GRANT:
#pragma once #pragma once
#include "ft/comparator.h"
#include "ft/cachetable.h" #include "ft/cachetable.h"
#include "ft/bndata.h" #include "ft/bndata.h"
#include "ft/fttypes.h" #include "ft/fttypes.h"
...@@ -305,7 +306,7 @@ void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node); ...@@ -305,7 +306,7 @@ void toku_ftnode_clone_partitions(FTNODE node, FTNODE cloned_node);
void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, int num_children, void toku_initialize_empty_ftnode(FTNODE node, BLOCKNUM blocknum, int height, int num_children,
int layout_version, unsigned int flags); int layout_version, unsigned int flags);
int toku_ftnode_which_child(FTNODE node, const DBT *k, DESCRIPTOR desc, ft_compare_func cmp); int toku_ftnode_which_child(FTNODE node, const DBT *k, const toku::comparator &cmp);
// //
// Field in ftnode_fetch_extra that tells the // Field in ftnode_fetch_extra that tells the
...@@ -378,25 +379,31 @@ typedef struct ftnode_fetch_extra *FTNODE_FETCH_EXTRA; ...@@ -378,25 +379,31 @@ typedef struct ftnode_fetch_extra *FTNODE_FETCH_EXTRA;
// TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces // TODO: put the heaviside functions into their respective 'struct .*extra;' namespaces
// //
struct toku_msg_buffer_key_msn_heaviside_extra { struct toku_msg_buffer_key_msn_heaviside_extra {
DESCRIPTOR desc; const toku::comparator &cmp;
ft_compare_func cmp;
message_buffer *msg_buffer; message_buffer *msg_buffer;
const DBT *key; const DBT *key;
MSN msn; MSN msn;
toku_msg_buffer_key_msn_heaviside_extra(const toku::comparator &c, message_buffer *mb, const DBT *k, MSN m) :
cmp(c), msg_buffer(mb), key(k), msn(m) {
}
}; };
int toku_msg_buffer_key_msn_heaviside(const int32_t &v, const struct toku_msg_buffer_key_msn_heaviside_extra &extra); int toku_msg_buffer_key_msn_heaviside(const int32_t &v, const struct toku_msg_buffer_key_msn_heaviside_extra &extra);
struct toku_msg_buffer_key_msn_cmp_extra { struct toku_msg_buffer_key_msn_cmp_extra {
DESCRIPTOR desc; const toku::comparator &cmp;
ft_compare_func cmp;
message_buffer *msg_buffer; message_buffer *msg_buffer;
toku_msg_buffer_key_msn_cmp_extra(const toku::comparator &c, message_buffer *mb) :
cmp(c), msg_buffer(mb) {
}
}; };
int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extrap, const int &a, const int &b); int toku_msg_buffer_key_msn_cmp(const struct toku_msg_buffer_key_msn_cmp_extra &extrap, const int &a, const int &b);
struct toku_msg_leafval_heaviside_extra { struct toku_msg_leafval_heaviside_extra {
ft_compare_func compare_fun; const toku::comparator &cmp;
DESCRIPTOR desc; DBT const *const key;
DBT const * const key; toku_msg_leafval_heaviside_extra(const toku::comparator &c, const DBT *k) :
cmp(c), key(k) {
}
}; };
int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be); int toku_msg_leafval_heaviside(DBT const &kdbt, const struct toku_msg_leafval_heaviside_extra &be);
...@@ -404,7 +411,7 @@ unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc); ...@@ -404,7 +411,7 @@ unsigned int toku_bnc_nbytesinbuf(NONLEAF_CHILDINFO bnc);
int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc); int toku_bnc_n_entries(NONLEAF_CHILDINFO bnc);
long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc); long toku_bnc_memory_size(NONLEAF_CHILDINFO bnc);
long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc); long toku_bnc_memory_used(NONLEAF_CHILDINFO bnc);
void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, DESCRIPTOR desc, ft_compare_func cmp); void toku_bnc_insert_msg(NONLEAF_CHILDINFO bnc, const void *key, ITEMLEN keylen, const void *data, ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, const toku::comparator &cmp);
void toku_bnc_empty(NONLEAF_CHILDINFO bnc); void toku_bnc_empty(NONLEAF_CHILDINFO bnc);
void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known); void toku_bnc_flush_to_child(FT ft, NONLEAF_CHILDINFO bnc, FTNODE child, TXNID parent_oldest_referenced_xid_known);
bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull)); bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) __attribute__((const, nonnull));
...@@ -435,11 +442,10 @@ enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize); ...@@ -435,11 +442,10 @@ enum reactivity toku_ftnode_get_leaf_reactivity(FTNODE node, uint32_t nodesize);
* If k is equal to some pivot, then we return the next (to the right) * If k is equal to some pivot, then we return the next (to the right)
* childnum. * childnum.
*/ */
int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, int toku_ftnode_hot_next_child(FTNODE node, const DBT *k, const toku::comparator &cmp);
DESCRIPTOR desc, ft_compare_func cmp);
void toku_ftnode_put_msg(ft_compare_func compare_fun, ft_update_func update_fun, void toku_ftnode_put_msg(const toku::comparator &cmp, ft_update_func update_fun,
DESCRIPTOR desc, FTNODE node, int target_childnum, FTNODE node, int target_childnum,
FT_MSG msg, bool is_fresh, txn_gc_info *gc_info, FT_MSG msg, bool is_fresh, txn_gc_info *gc_info,
size_t flow_deltas[], STAT64INFO stats_to_update); size_t flow_deltas[], STAT64INFO stats_to_update);
...@@ -447,12 +453,12 @@ void toku_ft_bn_apply_msg_once(BASEMENTNODE bn, const FT_MSG msg, uint32_t idx, ...@@ -447,12 +453,12 @@ void toku_ft_bn_apply_msg_once(BASEMENTNODE bn, const FT_MSG msg, uint32_t idx,
uint32_t le_keylen, LEAFENTRY le, txn_gc_info *gc_info, uint32_t le_keylen, LEAFENTRY le, txn_gc_info *gc_info,
uint64_t *workdonep, STAT64INFO stats_to_update); uint64_t *workdonep, STAT64INFO stats_to_update);
void toku_ft_bn_apply_msg(ft_compare_func compare_fun, ft_update_func update_fun, void toku_ft_bn_apply_msg(const toku::comparator &cmp, ft_update_func update_fun,
DESCRIPTOR desc, BASEMENTNODE bn, FT_MSG msg, txn_gc_info *gc_info, BASEMENTNODE bn, FT_MSG msg, txn_gc_info *gc_info,
uint64_t *workdone, STAT64INFO stats_to_update); uint64_t *workdone, STAT64INFO stats_to_update);
void toku_ft_leaf_apply_msg(ft_compare_func compare_fun, ft_update_func update_fun, void toku_ft_leaf_apply_msg(const toku::comparator &cmp, ft_update_func update_fun,
DESCRIPTOR desc, FTNODE node, int target_childnum, FTNODE node, int target_childnum,
FT_MSG msg, txn_gc_info *gc_info, FT_MSG msg, txn_gc_info *gc_info,
uint64_t *workdone, STAT64INFO stats_to_update); uint64_t *workdone, STAT64INFO stats_to_update);
...@@ -487,7 +493,7 @@ bool toku_ft_leaf_needs_ancestors_messages(FT ft, FTNODE node, ANCESTORS ancesto ...@@ -487,7 +493,7 @@ bool toku_ft_leaf_needs_ancestors_messages(FT ft, FTNODE node, ANCESTORS ancesto
void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read); void toku_ft_bn_update_max_msn(FTNODE node, MSN max_msn_applied, int child_to_read);
struct ft_search; struct ft_search;
int toku_ft_search_which_child(DESCRIPTOR desc, ft_compare_func cmp, FTNODE node, ft_search *search); int toku_ft_search_which_child(const toku::comparator &cmp, FTNODE node, ft_search *search);
// //
// internal node inline functions // internal node inline functions
......
...@@ -137,6 +137,9 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) ...@@ -137,6 +137,9 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat)
struct timeval t[2]; struct timeval t[2];
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
toku::comparator cmp;
cmp.create(long_key_cmp, nullptr);
for (unsigned int i = 0; i < repeat; ++i) { for (unsigned int i = 0; i < repeat; ++i) {
bnc = toku_create_empty_nl(); bnc = toku_create_empty_nl();
for (; toku_bnc_nbytesinbuf(bnc) <= nodesize; ++cur) { for (; toku_bnc_nbytesinbuf(bnc) <= nodesize; ++cur) {
...@@ -144,7 +147,7 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) ...@@ -144,7 +147,7 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat)
&keys[cur % 1024], sizeof keys[cur % 1024], &keys[cur % 1024], sizeof keys[cur % 1024],
vals[cur % 1024], eltsize - (sizeof keys[cur % 1024]), vals[cur % 1024], eltsize - (sizeof keys[cur % 1024]),
FT_NONE, next_dummymsn(), xids_123, true, FT_NONE, next_dummymsn(), xids_123, true,
NULL, long_key_cmp); assert_zero(r); cmp); assert_zero(r);
} }
nbytesinserted += toku_bnc_nbytesinbuf(bnc); nbytesinserted += toku_bnc_nbytesinbuf(bnc);
destroy_nonleaf_childinfo(bnc); destroy_nonleaf_childinfo(bnc);
...@@ -157,6 +160,8 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat) ...@@ -157,6 +160,8 @@ run_test(unsigned long eltsize, unsigned long nodesize, unsigned long repeat)
long long unsigned eltrate = (long) (cur / dt); long long unsigned eltrate = (long) (cur / dt);
printf("%0.03lf MB/sec\n", mbrate); printf("%0.03lf MB/sec\n", mbrate);
printf("%llu elts/sec\n", eltrate); printf("%llu elts/sec\n", eltrate);
cmp.destroy();
} }
int int
......
...@@ -112,14 +112,16 @@ static void test_desc(void) { ...@@ -112,14 +112,16 @@ static void test_desc(void) {
// create with d1, make sure it gets used // create with d1, make sure it gets used
cmp.create(magic_compare, &d1); cmp.create(magic_compare, &d1);
expected_desc = &d1; expected_desc = &d1;
c = cmp.compare(&dbt_a, &dbt_b); c = cmp(&dbt_a, &dbt_b);
invariant(c == MAGIC); invariant(c == MAGIC);
// set desc to d2, make sure it gets used // set desc to d2, make sure it gets used
cmp.set_descriptor(&d2); cmp.set_descriptor(&d2);
expected_desc = &d2; expected_desc = &d2;
c = cmp.compare(&dbt_a, &dbt_b); c = cmp(&dbt_a, &dbt_b);
invariant(c == MAGIC); invariant(c == MAGIC);
cmp.destroy();
} }
static int dont_compare_me_bro(DB *db, const DBT *a, const DBT *b) { static int dont_compare_me_bro(DB *db, const DBT *a, const DBT *b) {
...@@ -137,20 +139,22 @@ static void test_infinity(void) { ...@@ -137,20 +139,22 @@ static void test_infinity(void) {
// should never be called and thus the dbt never actually read. // should never be called and thus the dbt never actually read.
DBT arbitrary_dbt; DBT arbitrary_dbt;
c = cmp.compare(&arbitrary_dbt, toku_dbt_positive_infinity()); c = cmp(&arbitrary_dbt, toku_dbt_positive_infinity());
invariant(c < 0); invariant(c < 0);
c = cmp.compare(toku_dbt_negative_infinity(), &arbitrary_dbt); c = cmp(toku_dbt_negative_infinity(), &arbitrary_dbt);
invariant(c < 0); invariant(c < 0);
c = cmp.compare(toku_dbt_positive_infinity(), &arbitrary_dbt); c = cmp(toku_dbt_positive_infinity(), &arbitrary_dbt);
invariant(c > 0); invariant(c > 0);
c = cmp.compare(&arbitrary_dbt, toku_dbt_negative_infinity()); c = cmp(&arbitrary_dbt, toku_dbt_negative_infinity());
invariant(c > 0); invariant(c > 0);
c = cmp.compare(toku_dbt_negative_infinity(), toku_dbt_negative_infinity()); c = cmp(toku_dbt_negative_infinity(), toku_dbt_negative_infinity());
invariant(c == 0); invariant(c == 0);
c = cmp.compare(toku_dbt_positive_infinity(), toku_dbt_positive_infinity()); c = cmp(toku_dbt_positive_infinity(), toku_dbt_positive_infinity());
invariant(c == 0); invariant(c == 0);
cmp.destroy();
} }
int main(void) { int main(void) {
......
...@@ -103,7 +103,6 @@ int64_key_cmp (DB *db UU(), const DBT *a, const DBT *b) { ...@@ -103,7 +103,6 @@ int64_key_cmp (DB *db UU(), const DBT *a, const DBT *b) {
static void static void
test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
int r; int r;
ft_h->compare_fun = int64_key_cmp;
FT_CURSOR XMALLOC(cursor); FT_CURSOR XMALLOC(cursor);
FTNODE dn = NULL; FTNODE dn = NULL;
PAIR_ATTR attr; PAIR_ATTR attr;
...@@ -250,7 +249,6 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) { ...@@ -250,7 +249,6 @@ test_prefetch_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
static void static void
test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) { test_subset_read(int fd, FT_HANDLE UU(ft), FT ft_h) {
int r; int r;
ft_h->compare_fun = int64_key_cmp;
FT_CURSOR XMALLOC(cursor); FT_CURSOR XMALLOC(cursor);
FTNODE dn = NULL; FTNODE dn = NULL;
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
...@@ -422,6 +420,7 @@ test_prefetching(void) { ...@@ -422,6 +420,7 @@ test_prefetching(void) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
ft_h->cmp.create(int64_key_cmp, nullptr);
ft->ft = ft_h; ft->ft = ft_h;
toku_blocktable_create_new(&ft_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
...@@ -453,6 +452,7 @@ test_prefetching(void) { ...@@ -453,6 +452,7 @@ test_prefetching(void) {
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
ft_h->cmp.destroy();
toku_free(ft_h->h); toku_free(ft_h->h);
toku_free(ft_h); toku_free(ft_h);
toku_free(ft); toku_free(ft);
......
...@@ -147,7 +147,6 @@ static void ...@@ -147,7 +147,6 @@ static void
test1(int fd, FT ft_h, FTNODE *dn) { test1(int fd, FT ft_h, FTNODE *dn) {
int r; int r;
struct ftnode_fetch_extra bfe_all; struct ftnode_fetch_extra bfe_all;
ft_h->compare_fun = string_key_cmp;
fill_bfe_for_full_read(&bfe_all, ft_h); fill_bfe_for_full_read(&bfe_all, ft_h);
FTNODE_DISK_DATA ndd = NULL; FTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all); r = toku_deserialize_ftnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all);
...@@ -226,7 +225,6 @@ test2(int fd, FT ft_h, FTNODE *dn) { ...@@ -226,7 +225,6 @@ test2(int fd, FT ft_h, FTNODE *dn) {
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
ft_search search; ft_search search;
ft_h->compare_fun = string_key_cmp;
fill_bfe_for_subset_read( fill_bfe_for_subset_read(
&bfe_subset, &bfe_subset,
ft_h, ft_h,
...@@ -279,7 +277,6 @@ test3_leaf(int fd, FT ft_h, FTNODE *dn) { ...@@ -279,7 +277,6 @@ test3_leaf(int fd, FT ft_h, FTNODE *dn) {
memset(&left, 0, sizeof(left)); memset(&left, 0, sizeof(left));
memset(&right, 0, sizeof(right)); memset(&right, 0, sizeof(right));
ft_h->compare_fun = string_key_cmp;
fill_bfe_for_min_read( fill_bfe_for_min_read(
&bfe_min, &bfe_min,
ft_h ft_h
...@@ -335,13 +332,18 @@ test_serialize_nonleaf(void) { ...@@ -335,13 +332,18 @@ test_serialize_nonleaf(void) {
r = xids_create_child(xids_123, &xids_234, (TXNID)234); r = xids_create_child(xids_123, &xids_234, (TXNID)234);
CKERR(r); CKERR(r);
toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, NULL, string_key_cmp); toku::comparator cmp;
toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, NULL, string_key_cmp); cmp.create(string_key_cmp, nullptr);
toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, NULL, string_key_cmp);
toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, cmp);
toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, cmp);
toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, cmp);
//Cleanup: //Cleanup:
xids_destroy(&xids_0); xids_destroy(&xids_0);
xids_destroy(&xids_123); xids_destroy(&xids_123);
xids_destroy(&xids_234); xids_destroy(&xids_234);
cmp.destroy();
FT_HANDLE XMALLOC(ft); FT_HANDLE XMALLOC(ft);
FT XCALLOC(ft_h); FT XCALLOC(ft_h);
...@@ -353,6 +355,7 @@ test_serialize_nonleaf(void) { ...@@ -353,6 +355,7 @@ test_serialize_nonleaf(void) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
ft_h->cmp.create(string_key_cmp, nullptr);
ft->ft = ft_h; ft->ft = ft_h;
toku_blocktable_create_new(&ft_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
...@@ -387,6 +390,7 @@ test_serialize_nonleaf(void) { ...@@ -387,6 +390,7 @@ test_serialize_nonleaf(void) {
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h); toku_free(ft_h->h);
ft_h->cmp.destroy();
toku_free(ft_h); toku_free(ft_h);
toku_free(ft); toku_free(ft);
......
...@@ -195,9 +195,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de ...@@ -195,9 +195,9 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
ft_h->cmp.create(long_key_cmp, nullptr);
ft->ft = ft_h; ft->ft = ft_h;
ft_h->compare_fun = long_key_cmp;
toku_blocktable_create_new(&ft_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
...@@ -279,6 +279,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de ...@@ -279,6 +279,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
ft_h->cmp.destroy();
toku_free(ft_h->h); toku_free(ft_h->h);
toku_free(ft_h); toku_free(ft_h);
toku_free(ft); toku_free(ft);
...@@ -317,6 +318,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -317,6 +318,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
XIDS xids_123; XIDS xids_123;
r = xids_create_child(xids_0, &xids_123, (TXNID)123); r = xids_create_child(xids_0, &xids_123, (TXNID)123);
CKERR(r); CKERR(r);
toku::comparator cmp;
cmp.create(long_key_cmp, nullptr);
int nperchild = nelts / 8; int nperchild = nelts / 8;
for (int ck = 0; ck < sn.n_children; ++ck) { for (int ck = 0; ck < sn.n_children; ++ck) {
long k; long k;
...@@ -332,7 +335,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -332,7 +335,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
} }
memset(&buf[c], 0, valsize - c); memset(&buf[c], 0, valsize - c);
toku_bnc_insert_msg(bnc, &k, sizeof k, buf, valsize, FT_NONE, next_dummymsn(), xids_123, true, NULL, long_key_cmp); toku_bnc_insert_msg(bnc, &k, sizeof k, buf, valsize, FT_NONE, next_dummymsn(), xids_123, true, cmp);
} }
if (ck < 7) { if (ck < 7) {
DBT pivotkey; DBT pivotkey;
...@@ -343,6 +346,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -343,6 +346,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
//Cleanup: //Cleanup:
xids_destroy(&xids_0); xids_destroy(&xids_0);
xids_destroy(&xids_123); xids_destroy(&xids_123);
cmp.destroy();
FT_HANDLE XMALLOC(ft); FT_HANDLE XMALLOC(ft);
FT XCALLOC(ft_h); FT XCALLOC(ft_h);
...@@ -354,9 +358,9 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -354,9 +358,9 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
ft_h->cmp.create(long_key_cmp, nullptr);
ft->ft = ft_h; ft->ft = ft_h;
ft_h->compare_fun = long_key_cmp;
toku_blocktable_create_new(&ft_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); } { int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
...@@ -411,10 +415,12 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int ...@@ -411,10 +415,12 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable); toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h); toku_free(ft_h->h);
ft_h->cmp.destroy();
toku_free(ft_h); toku_free(ft_h);
toku_free(ft); toku_free(ft);
toku_free(ndd); toku_free(ndd);
toku_free(ndd2); toku_free(ndd2);
cmp.destroy();
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
......
...@@ -164,7 +164,6 @@ string_key_cmp(DB *UU(e), const DBT *a, const DBT *b) ...@@ -164,7 +164,6 @@ string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
static void static void
setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) { setup_dn(enum ftnode_verify_type bft, int fd, FT ft_h, FTNODE *dn, FTNODE_DISK_DATA* ndd) {
int r; int r;
ft_h->compare_fun = string_key_cmp;
if (bft == read_all) { if (bft == read_all) {
struct ftnode_fetch_extra bfe; struct ftnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, ft_h); fill_bfe_for_full_read(&bfe, ft_h);
...@@ -1050,13 +1049,18 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { ...@@ -1050,13 +1049,18 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
r = xids_create_child(xids_123, &xids_234, (TXNID)234); r = xids_create_child(xids_123, &xids_234, (TXNID)234);
CKERR(r); CKERR(r);
toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, NULL, string_key_cmp); toku::comparator cmp;
toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, NULL, string_key_cmp); cmp.create(string_key_cmp, nullptr);
toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, NULL, string_key_cmp);
toku_bnc_insert_msg(BNC(&sn, 0), "a", 2, "aval", 5, FT_NONE, next_dummymsn(), xids_0, true, cmp);
toku_bnc_insert_msg(BNC(&sn, 0), "b", 2, "bval", 5, FT_NONE, next_dummymsn(), xids_123, false, cmp);
toku_bnc_insert_msg(BNC(&sn, 1), "x", 2, "xval", 5, FT_NONE, next_dummymsn(), xids_234, true, cmp);
//Cleanup: //Cleanup:
xids_destroy(&xids_0); xids_destroy(&xids_0);
xids_destroy(&xids_123); xids_destroy(&xids_123);
xids_destroy(&xids_234); xids_destroy(&xids_234);
cmp.destroy();
FT_HANDLE XMALLOC(ft); FT_HANDLE XMALLOC(ft);
FT XCALLOC(ft_h); FT XCALLOC(ft_h);
...@@ -1068,6 +1072,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { ...@@ -1068,6 +1072,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
128*1024, 128*1024,
TOKU_DEFAULT_COMPRESSION_METHOD, TOKU_DEFAULT_COMPRESSION_METHOD,
16); 16);
ft_h->cmp.create(string_key_cmp, nullptr);
ft->ft = ft_h; ft->ft = ft_h;
toku_blocktable_create_new(&ft_h->blocktable); toku_blocktable_create_new(&ft_h->blocktable);
...@@ -1125,6 +1130,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) { ...@@ -1125,6 +1130,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
toku_free(ft); toku_free(ft);
toku_free(src_ndd); toku_free(src_ndd);
toku_free(dest_ndd); toku_free(dest_ndd);
cmp.destroy();
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
......
...@@ -152,7 +152,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in ...@@ -152,7 +152,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in
unsigned int key = htonl(val); unsigned int key = htonl(val);
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
node->max_msn_applied_to_node_on_disk = msn; node->max_msn_applied_to_node_on_disk = msn;
} }
} }
......
...@@ -134,7 +134,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val ...@@ -134,7 +134,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; FT_MSG_S msg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} };
txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false); txn_gc_info gc_info(nullptr, TXNID_NONE, TXNID_NONE, false);
toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, &msg, &gc_info, nullptr, nullptr);
{ {
int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair); int r = toku_ft_lookup(ft, &thekey, lookup_checkf, &pair);
assert(r==0); assert(r==0);
...@@ -142,7 +142,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val ...@@ -142,7 +142,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
} }
FT_MSG_S badmsg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} }; FT_MSG_S badmsg = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} };
toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &badmsg, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, &badmsg, &gc_info, nullptr, nullptr);
// message should be rejected for duplicate msn, row should still have original val // message should be rejected for duplicate msn, row should still have original val
{ {
...@@ -155,7 +155,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val ...@@ -155,7 +155,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
msn = next_dummymsn(); msn = next_dummymsn();
ft->ft->h->max_msn_in_ft = msn; ft->ft->h->max_msn_in_ft = msn;
FT_MSG_S msg2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} }; FT_MSG_S msg2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} };
toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg2, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, &msg2, &gc_info, nullptr, nullptr);
// message should be accepted, val should have new value // message should be accepted, val should have new value
{ {
...@@ -167,7 +167,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val ...@@ -167,7 +167,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
// now verify that message with lesser (older) msn is rejected // now verify that message with lesser (older) msn is rejected
msn.msn = msn.msn - 10; msn.msn = msn.msn - 10;
FT_MSG_S msg3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }}; FT_MSG_S msg3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }};
toku_ft_leaf_apply_msg(ft->ft->compare_fun, ft->ft->update_fun, &ft->ft->cmp_descriptor, leafnode, -1, &msg3, &gc_info, nullptr, nullptr); toku_ft_leaf_apply_msg(ft->ft->cmp, ft->ft->update_fun, leafnode, -1, &msg3, &gc_info, nullptr, nullptr);
// message should be rejected, val should still have value in pair2 // message should be rejected, val should still have value in pair2
{ {
......
This diff is collapsed.
...@@ -156,7 +156,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in ...@@ -156,7 +156,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in
unsigned int key = htonl(val); unsigned int key = htonl(val);
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
// Create bad tree (don't do following): // Create bad tree (don't do following):
// node->max_msn_applied_to_node = msn; // node->max_msn_applied_to_node = msn;
......
...@@ -144,7 +144,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in ...@@ -144,7 +144,7 @@ insert_into_child_buffer(FT_HANDLE ft, FTNODE node, int childnum, int minkey, in
DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key); DBT thekey; toku_fill_dbt(&thekey, &key, sizeof key);
DBT theval; toku_fill_dbt(&theval, &val, sizeof val); DBT theval; toku_fill_dbt(&theval, &val, sizeof val);
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
toku_ft_append_to_child_buffer(ft->ft->compare_fun, NULL, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval); toku_ft_append_to_child_buffer(ft->ft->cmp, node, childnum, FT_INSERT, msn, xids_get_root_xids(), true, &thekey, &theval);
} }
} }
......
...@@ -281,3 +281,16 @@ ...@@ -281,3 +281,16 @@
fun:_dl_start fun:_dl_start
obj:/lib/x86_64-linux-gnu/ld-2.17.so obj:/lib/x86_64-linux-gnu/ld-2.17.so
} }
{
<ld_is_not_clean_on_arch_linux_june_2014>
Memcheck:Leak
match-leak-kinds: reachable
fun:calloc
obj:/usr/lib/libdl-2.19.so
fun:dlsym
fun:_Z19toku_memory_startupv
fun:call_init.part.0
fun:_dl_init
obj:/usr/lib/ld-2.19.so
}
...@@ -91,7 +91,7 @@ PATENT RIGHTS GRANT: ...@@ -91,7 +91,7 @@ PATENT RIGHTS GRANT:
#include <toku_assert.h> #include <toku_assert.h>
void concurrent_tree::create(comparator *cmp) { void concurrent_tree::create(const comparator *cmp) {
// start with an empty root node. we do this instead of // start with an empty root node. we do this instead of
// setting m_root to null so there's always a root to lock // setting m_root to null so there's always a root to lock
m_root.create_root(cmp); m_root.create_root(cmp);
......
...@@ -173,7 +173,7 @@ class concurrent_tree { ...@@ -173,7 +173,7 @@ class concurrent_tree {
}; };
// effect: initialize the tree to an empty state // effect: initialize the tree to an empty state
void create(comparator *cmp); void create(const comparator *cmp);
// effect: destroy the tree. // effect: destroy the tree.
// requires: tree is empty // requires: tree is empty
......
...@@ -129,13 +129,13 @@ void keyrange::create_copy(const keyrange &range) { ...@@ -129,13 +129,13 @@ void keyrange::create_copy(const keyrange &range) {
// extend this keyrange by choosing the leftmost and rightmost // extend this keyrange by choosing the leftmost and rightmost
// endpoints between this range and the given. replaced keys // endpoints between this range and the given. replaced keys
// in this range are freed and inherited keys are copied. // in this range are freed and inherited keys are copied.
void keyrange::extend(comparator *cmp, const keyrange &range) { void keyrange::extend(const comparator &cmp, const keyrange &range) {
const DBT *range_left = range.get_left_key(); const DBT *range_left = range.get_left_key();
const DBT *range_right = range.get_right_key(); const DBT *range_right = range.get_right_key();
if (cmp->compare(range_left, get_left_key()) < 0) { if (cmp(range_left, get_left_key()) < 0) {
replace_left_key(range_left); replace_left_key(range_left);
} }
if (cmp->compare(range_right, get_right_key()) > 0) { if (cmp(range_right, get_right_key()) > 0) {
replace_right_key(range_right); replace_right_key(range_right);
} }
} }
...@@ -152,20 +152,20 @@ uint64_t keyrange::get_memory_size(void) const { ...@@ -152,20 +152,20 @@ uint64_t keyrange::get_memory_size(void) const {
} }
// compare ranges. // compare ranges.
keyrange::comparison keyrange::compare(comparator *cmp, const keyrange &range) const { keyrange::comparison keyrange::compare(const comparator &cmp, const keyrange &range) const {
if (cmp->compare(get_right_key(), range.get_left_key()) < 0) { if (cmp(get_right_key(), range.get_left_key()) < 0) {
return comparison::LESS_THAN; return comparison::LESS_THAN;
} else if (cmp->compare(get_left_key(), range.get_right_key()) > 0) { } else if (cmp(get_left_key(), range.get_right_key()) > 0) {
return comparison::GREATER_THAN; return comparison::GREATER_THAN;
} else if (cmp->compare(get_left_key(), range.get_left_key()) == 0 && } else if (cmp(get_left_key(), range.get_left_key()) == 0 &&
cmp->compare(get_right_key(), range.get_right_key()) == 0) { cmp(get_right_key(), range.get_right_key()) == 0) {
return comparison::EQUALS; return comparison::EQUALS;
} else { } else {
return comparison::OVERLAPS; return comparison::OVERLAPS;
} }
} }
bool keyrange::overlaps(comparator *cmp, const keyrange &range) const { bool keyrange::overlaps(const comparator &cmp, const keyrange &range) const {
// equality is a stronger form of overlapping. // equality is a stronger form of overlapping.
// so two ranges "overlap" if they're either equal or just overlapping. // so two ranges "overlap" if they're either equal or just overlapping.
comparison c = compare(cmp, range); comparison c = compare(cmp, range);
......
...@@ -117,7 +117,7 @@ class keyrange { ...@@ -117,7 +117,7 @@ class keyrange {
// effect: extends the keyrange by choosing the leftmost and rightmost // effect: extends the keyrange by choosing the leftmost and rightmost
// endpoints from this range and the given range. // endpoints from this range and the given range.
// replaced keys in this range are freed, new keys are copied. // replaced keys in this range are freed, new keys are copied.
void extend(comparator *cmp, const keyrange &range); void extend(const comparator &cmp, const keyrange &range);
// returns: the amount of memory this keyrange takes. does not account // returns: the amount of memory this keyrange takes. does not account
// for point optimizations or malloc overhead. // for point optimizations or malloc overhead.
...@@ -143,10 +143,10 @@ class keyrange { ...@@ -143,10 +143,10 @@ class keyrange {
// EQUALS if given range has the same left and right endpoints // EQUALS if given range has the same left and right endpoints
// OVERLAPS if at least one of the given range's endpoints falls // OVERLAPS if at least one of the given range's endpoints falls
// between this range's endpoints // between this range's endpoints
comparison compare(comparator *cmp, const keyrange &range) const; comparison compare(const comparator &cmp, const keyrange &range) const;
// returns: true if the range and the given range are equal or overlapping // returns: true if the range and the given range are equal or overlapping
bool overlaps(comparator *cmp, const keyrange &range) const; bool overlaps(const comparator &cmp, const keyrange &range) const;
// returns: a keyrange representing -inf, +inf // returns: a keyrange representing -inf, +inf
static keyrange get_infinite_range(void); static keyrange get_infinite_range(void);
......
...@@ -121,14 +121,12 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, ...@@ -121,14 +121,12 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id,
m_mgr = mgr; m_mgr = mgr;
m_dict_id = dict_id; m_dict_id = dict_id;
// the only reason m_cmp is malloc'd here is to prevent gdb from printing m_cmp.create(cmp, desc);
// out an entire DB struct every time you inspect a locktree.
XCALLOC(m_cmp);
m_cmp->create(cmp, desc);
m_reference_count = 1; m_reference_count = 1;
m_userdata = nullptr; m_userdata = nullptr;
XCALLOC(m_rangetree); XCALLOC(m_rangetree);
m_rangetree->create(m_cmp); m_rangetree->create(&m_cmp);
m_sto_txnid = TXNID_NONE; m_sto_txnid = TXNID_NONE;
m_sto_buffer.create(); m_sto_buffer.create();
...@@ -155,11 +153,10 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, ...@@ -155,11 +153,10 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id,
void locktree::destroy(void) { void locktree::destroy(void) {
invariant(m_reference_count == 0); invariant(m_reference_count == 0);
m_cmp.destroy();
m_rangetree->destroy(); m_rangetree->destroy();
toku_free(m_cmp);
toku_free(m_rangetree); toku_free(m_rangetree);
m_sto_buffer.destroy(); m_sto_buffer.destroy();
m_lock_request_info.pending_lock_requests.destroy(); m_lock_request_info.pending_lock_requests.destroy();
} }
...@@ -299,7 +296,7 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) { ...@@ -299,7 +296,7 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
concurrent_tree sto_rangetree; concurrent_tree sto_rangetree;
concurrent_tree::locked_keyrange sto_lkr; concurrent_tree::locked_keyrange sto_lkr;
sto_rangetree.create(m_cmp); sto_rangetree.create(&m_cmp);
// insert all of the ranges from the single txnid buffer into a new rangtree // insert all of the ranges from the single txnid buffer into a new rangtree
range_buffer::iterator iter(&m_sto_buffer); range_buffer::iterator iter(&m_sto_buffer);
...@@ -438,7 +435,7 @@ int locktree::try_acquire_lock(bool is_write_request, ...@@ -438,7 +435,7 @@ int locktree::try_acquire_lock(bool is_write_request,
txnid_set *conflicts, bool big_txn) { txnid_set *conflicts, bool big_txn) {
// All ranges in the locktree must have left endpoints <= right endpoints. // All ranges in the locktree must have left endpoints <= right endpoints.
// Range comparisons rely on this fact, so we make a paranoid invariant here. // Range comparisons rely on this fact, so we make a paranoid invariant here.
paranoid_invariant(m_cmp->compare(left_key, right_key) <= 0); paranoid_invariant(m_cmp(left_key, right_key) <= 0);
int r = m_mgr == nullptr ? 0 : int r = m_mgr == nullptr ? 0 :
m_mgr->check_current_lock_constraints(big_txn); m_mgr->check_current_lock_constraints(big_txn);
if (r == 0) { if (r == 0) {
...@@ -581,7 +578,7 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) { ...@@ -581,7 +578,7 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) {
const DBT *right_key = rec.get_right_key(); const DBT *right_key = rec.get_right_key();
// All ranges in the locktree must have left endpoints <= right endpoints. // All ranges in the locktree must have left endpoints <= right endpoints.
// Range comparisons rely on this fact, so we make a paranoid invariant here. // Range comparisons rely on this fact, so we make a paranoid invariant here.
paranoid_invariant(m_cmp->compare(left_key, right_key) <= 0); paranoid_invariant(m_cmp(left_key, right_key) <= 0);
remove_overlapping_locks_for_txnid(txnid, left_key, right_key); remove_overlapping_locks_for_txnid(txnid, left_key, right_key);
iter.next(); iter.next();
} }
...@@ -795,7 +792,7 @@ struct lt_lock_request_info *locktree::get_lock_request_info(void) { ...@@ -795,7 +792,7 @@ struct lt_lock_request_info *locktree::get_lock_request_info(void) {
} }
void locktree::set_descriptor(DESCRIPTOR desc) { void locktree::set_descriptor(DESCRIPTOR desc) {
m_cmp->set_descriptor(desc); m_cmp.set_descriptor(desc);
} }
locktree_manager *locktree::get_manager(void) const { locktree_manager *locktree::get_manager(void) const {
......
...@@ -323,8 +323,7 @@ namespace toku { ...@@ -323,8 +323,7 @@ namespace toku {
// - Destroy the manager. // - Destroy the manager.
class locktree { class locktree {
public: public:
// effect: Creates a locktree that uses the given memory tracker // effect: Creates a locktree
// to report memory usage and honor memory constraints.
void create(locktree_manager *mgr, DICTIONARY_ID dict_id, void create(locktree_manager *mgr, DICTIONARY_ID dict_id,
DESCRIPTOR desc, ft_compare_func cmp); DESCRIPTOR desc, ft_compare_func cmp);
...@@ -399,7 +398,7 @@ namespace toku { ...@@ -399,7 +398,7 @@ namespace toku {
// is valid for as long as the locktree. this is currently // is valid for as long as the locktree. this is currently
// implemented by opening an ft_handle for this locktree and // implemented by opening an ft_handle for this locktree and
// storing it as userdata below. // storing it as userdata below.
comparator *m_cmp; comparator m_cmp;
concurrent_tree *m_rangetree; concurrent_tree *m_rangetree;
......
...@@ -126,19 +126,19 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) { ...@@ -126,19 +126,19 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) {
// if the subtree root does not overlap then one of its children // if the subtree root does not overlap then one of its children
// must exist and have an overlapping range. // must exist and have an overlapping range.
if (!lkr.m_subtree->m_range.overlaps(&cmp, range)) { if (!lkr.m_subtree->m_range.overlaps(cmp, range)) {
treenode *left = lkr.m_subtree->m_left_child.ptr; treenode *left = lkr.m_subtree->m_left_child.ptr;
treenode *right = lkr.m_subtree->m_right_child.ptr; treenode *right = lkr.m_subtree->m_right_child.ptr;
if (left != nullptr) { if (left != nullptr) {
// left exists, so if it does not overlap then the right must // left exists, so if it does not overlap then the right must
if (!left->m_range.overlaps(&cmp, range)) { if (!left->m_range.overlaps(cmp, range)) {
invariant_notnull(right); invariant_notnull(right);
invariant(right->m_range.overlaps(&cmp, range)); invariant(right->m_range.overlaps(cmp, range));
} }
} else { } else {
// no left child, so the right must exist and be overlapping // no left child, so the right must exist and be overlapping
invariant_notnull(right); invariant_notnull(right);
invariant(right->m_range.overlaps(&cmp, range)); invariant(right->m_range.overlaps(cmp, range));
} }
} }
...@@ -160,6 +160,8 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) { ...@@ -160,6 +160,8 @@ void concurrent_tree_unit_test::test_lkr_acquire_release(void) {
lkr.release(); lkr.release();
tree.destroy(); tree.destroy();
} }
cmp.destroy();
} }
} /* namespace toku */ } /* namespace toku */
......
...@@ -117,17 +117,17 @@ static void verify_unique_keys(void) { ...@@ -117,17 +117,17 @@ static void verify_unique_keys(void) {
} }
static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr, static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr,
comparator *cmp, const keyrange &range, bool range_should_exist) { const comparator &cmp, const keyrange &range, bool range_should_exist) {
struct check_fn_obj { struct check_fn_obj {
comparator *cmp; const comparator *cmp;
uint64_t count; uint64_t count;
keyrange target_range; keyrange target_range;
bool target_range_found; bool target_range_found;
bool fn(const keyrange &query_range, TXNID txnid) { bool fn(const keyrange &query_range, TXNID txnid) {
(void) txnid; (void) txnid;
if (query_range.compare(cmp, target_range) == keyrange::comparison::EQUALS) { if (query_range.compare(*cmp, target_range) == keyrange::comparison::EQUALS) {
invariant(!target_range_found); invariant(!target_range_found);
target_range_found = true; target_range_found = true;
} }
...@@ -135,7 +135,7 @@ static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr, ...@@ -135,7 +135,7 @@ static uint64_t check_for_range_and_count(concurrent_tree::locked_keyrange *lkr,
return true; return true;
} }
} check_fn; } check_fn;
check_fn.cmp = cmp; check_fn.cmp = &cmp;
check_fn.count = 0; check_fn.count = 0;
check_fn.target_range = range; check_fn.target_range = range;
check_fn.target_range_found = false; check_fn.target_range_found = false;
...@@ -174,14 +174,14 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) { ...@@ -174,14 +174,14 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) {
// insert an element. it should exist and the // insert an element. it should exist and the
// count should be correct. // count should be correct.
lkr.insert(range, i); lkr.insert(range, i);
n = check_for_range_and_count(&lkr, &cmp, range, true); n = check_for_range_and_count(&lkr, cmp, range, true);
if (i >= cap) { if (i >= cap) {
invariant(n == cap + 1); invariant(n == cap + 1);
// remove an element previously inserted. it should // remove an element previously inserted. it should
// no longer exist and the count should be correct. // no longer exist and the count should be correct.
range.create(get_ith_key_from_set(i - cap), get_ith_key_from_set(i - cap)); range.create(get_ith_key_from_set(i - cap), get_ith_key_from_set(i - cap));
lkr.remove(range); lkr.remove(range);
n = check_for_range_and_count(&lkr, &cmp, range, false); n = check_for_range_and_count(&lkr, cmp, range, false);
invariant(n == cap); invariant(n == cap);
} else { } else {
invariant(n == i + 1); invariant(n == i + 1);
...@@ -193,12 +193,13 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) { ...@@ -193,12 +193,13 @@ void concurrent_tree_unit_test::test_lkr_insert_remove(void) {
keyrange range; keyrange range;
range.create(get_ith_key_from_set(num_keys - i - 1), get_ith_key_from_set(num_keys - i - 1)); range.create(get_ith_key_from_set(num_keys - i - 1), get_ith_key_from_set(num_keys - i - 1));
lkr.remove(range); lkr.remove(range);
n = check_for_range_and_count(&lkr, &cmp, range, false); n = check_for_range_and_count(&lkr, cmp, range, false);
invariant(n == (cap - i - 1)); invariant(n == (cap - i - 1));
} }
lkr.release(); lkr.release();
tree.destroy(); tree.destroy();
cmp.destroy();
} }
} /* namespace toku */ } /* namespace toku */
......
...@@ -136,6 +136,7 @@ void concurrent_tree_unit_test::test_lkr_insert_serial_large(void) { ...@@ -136,6 +136,7 @@ void concurrent_tree_unit_test::test_lkr_insert_serial_large(void) {
lkr.release(); lkr.release();
tree.destroy(); tree.destroy();
cmp.destroy();
} }
} /* namespace toku */ } /* namespace toku */
......
...@@ -132,6 +132,8 @@ void concurrent_tree_unit_test::test_lkr_remove_all(void) { ...@@ -132,6 +132,8 @@ void concurrent_tree_unit_test::test_lkr_remove_all(void) {
lkr.release(); lkr.release();
tree.destroy(); tree.destroy();
} }
cmp.destroy();
} }
} /* namespace toku */ } /* namespace toku */
......
...@@ -128,11 +128,11 @@ void locktree_unit_test::test_misc(void) { ...@@ -128,11 +128,11 @@ void locktree_unit_test::test_misc(void) {
// descriptor when we set the locktree's descriptor // descriptor when we set the locktree's descriptor
lt.set_descriptor(&d1); lt.set_descriptor(&d1);
expected_descriptor = &d1; expected_descriptor = &d1;
r = lt.m_cmp->compare(&dbt_a, &dbt_b); r = lt.m_cmp(&dbt_a, &dbt_b);
invariant(r == expected_comparison_magic); invariant(r == expected_comparison_magic);
lt.set_descriptor(&d2); lt.set_descriptor(&d2);
expected_descriptor = &d2; expected_descriptor = &d2;
r = lt.m_cmp->compare(&dbt_a, &dbt_b); r = lt.m_cmp(&dbt_a, &dbt_b);
invariant(r == expected_comparison_magic); invariant(r == expected_comparison_magic);
lt.release_reference(); lt.release_reference();
......
...@@ -143,7 +143,7 @@ void locktree_unit_test::test_overlapping_relock(void) { ...@@ -143,7 +143,7 @@ void locktree_unit_test::test_overlapping_relock(void) {
bool saw_the_other; bool saw_the_other;
TXNID expected_txnid; TXNID expected_txnid;
keyrange *expected_range; keyrange *expected_range;
comparator *cmp; const comparator *cmp;
bool fn(const keyrange &range, TXNID txnid) { bool fn(const keyrange &range, TXNID txnid) {
if (txnid == the_other_txnid) { if (txnid == the_other_txnid) {
invariant(!saw_the_other); invariant(!saw_the_other);
...@@ -151,12 +151,12 @@ void locktree_unit_test::test_overlapping_relock(void) { ...@@ -151,12 +151,12 @@ void locktree_unit_test::test_overlapping_relock(void) {
return true; return true;
} }
invariant(txnid == expected_txnid); invariant(txnid == expected_txnid);
keyrange::comparison c = range.compare(cmp, *expected_range); keyrange::comparison c = range.compare(*cmp, *expected_range);
invariant(c == keyrange::comparison::EQUALS); invariant(c == keyrange::comparison::EQUALS);
return true; return true;
} }
} verify_fn; } verify_fn;
verify_fn.cmp = lt.m_cmp; verify_fn.cmp = &lt.m_cmp;
#define do_verify() \ #define do_verify() \
do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(&lt, &verify_fn); } while (0) do { verify_fn.saw_the_other = false; locktree_iterate<verify_fn_obj>(&lt, &verify_fn); } while (0)
......
...@@ -149,15 +149,15 @@ void locktree_unit_test::test_single_txnid_optimization(void) { ...@@ -149,15 +149,15 @@ void locktree_unit_test::test_single_txnid_optimization(void) {
struct verify_fn_obj { struct verify_fn_obj {
TXNID expected_txnid; TXNID expected_txnid;
keyrange *expected_range; keyrange *expected_range;
comparator *cmp; const comparator *cmp;
bool fn(const keyrange &range, TXNID txnid) { bool fn(const keyrange &range, TXNID txnid) {
invariant(txnid == expected_txnid); invariant(txnid == expected_txnid);
keyrange::comparison c = range.compare(cmp, *expected_range); keyrange::comparison c = range.compare(*cmp, *expected_range);
invariant(c == keyrange::comparison::EQUALS); invariant(c == keyrange::comparison::EQUALS);
return true; return true;
} }
} verify_fn; } verify_fn;
verify_fn.cmp = lt.m_cmp; verify_fn.cmp = &lt.m_cmp;
keyrange range; keyrange range;
range.create(one, one); range.create(one, one);
......
...@@ -99,7 +99,7 @@ void treenode::mutex_unlock(void) { ...@@ -99,7 +99,7 @@ void treenode::mutex_unlock(void) {
toku_mutex_unlock(&m_mutex); toku_mutex_unlock(&m_mutex);
} }
void treenode::init(comparator *cmp) { void treenode::init(const comparator *cmp) {
m_txnid = TXNID_NONE; m_txnid = TXNID_NONE;
m_is_root = false; m_is_root = false;
m_is_empty = true; m_is_empty = true;
...@@ -117,7 +117,7 @@ void treenode::init(comparator *cmp) { ...@@ -117,7 +117,7 @@ void treenode::init(comparator *cmp) {
m_right_child.set(nullptr); m_right_child.set(nullptr);
} }
void treenode::create_root(comparator *cmp) { void treenode::create_root(const comparator *cmp) {
init(cmp); init(cmp);
m_is_root = true; m_is_root = true;
} }
...@@ -145,10 +145,10 @@ bool treenode::is_empty(void) { ...@@ -145,10 +145,10 @@ bool treenode::is_empty(void) {
} }
bool treenode::range_overlaps(const keyrange &range) { bool treenode::range_overlaps(const keyrange &range) {
return m_range.overlaps(m_cmp, range); return m_range.overlaps(*m_cmp, range);
} }
treenode *treenode::alloc(comparator *cmp, const keyrange &range, TXNID txnid) { treenode *treenode::alloc(const comparator *cmp, const keyrange &range, TXNID txnid) {
treenode *XCALLOC(node); treenode *XCALLOC(node);
node->init(cmp); node->init(cmp);
node->set_range_and_txnid(range, txnid); node->set_range_and_txnid(range, txnid);
...@@ -190,7 +190,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, ...@@ -190,7 +190,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range,
// determine which child to look at based on a comparison. if we were // determine which child to look at based on a comparison. if we were
// given a comparison hint, use that. otherwise, compare them now. // given a comparison hint, use that. otherwise, compare them now.
keyrange::comparison c = cmp_hint ? *cmp_hint : range.compare(m_cmp, m_range); keyrange::comparison c = cmp_hint ? *cmp_hint : range.compare(*m_cmp, m_range);
treenode *child; treenode *child;
if (c == keyrange::comparison::LESS_THAN) { if (c == keyrange::comparison::LESS_THAN) {
...@@ -209,7 +209,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, ...@@ -209,7 +209,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range,
if (child == nullptr) { if (child == nullptr) {
return this; return this;
} else { } else {
c = range.compare(m_cmp, child->m_range); c = range.compare(*m_cmp, child->m_range);
if (c == keyrange::comparison::EQUALS || c == keyrange::comparison::OVERLAPS) { if (c == keyrange::comparison::EQUALS || c == keyrange::comparison::OVERLAPS) {
child->mutex_unlock(); child->mutex_unlock();
return this; return this;
...@@ -225,7 +225,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range, ...@@ -225,7 +225,7 @@ treenode *treenode::find_node_with_overlapping_child(const keyrange &range,
template <class F> template <class F>
void treenode::traverse_overlaps(const keyrange &range, F *function) { void treenode::traverse_overlaps(const keyrange &range, F *function) {
keyrange::comparison c = range.compare(m_cmp, m_range); keyrange::comparison c = range.compare(*m_cmp, m_range);
if (c == keyrange::comparison::EQUALS) { if (c == keyrange::comparison::EQUALS) {
// Doesn't matter if fn wants to keep going, there // Doesn't matter if fn wants to keep going, there
// is nothing left, so return. // is nothing left, so return.
...@@ -264,7 +264,7 @@ void treenode::traverse_overlaps(const keyrange &range, F *function) { ...@@ -264,7 +264,7 @@ void treenode::traverse_overlaps(const keyrange &range, F *function) {
void treenode::insert(const keyrange &range, TXNID txnid) { void treenode::insert(const keyrange &range, TXNID txnid) {
// choose a child to check. if that child is null, then insert the new node there. // choose a child to check. if that child is null, then insert the new node there.
// otherwise recur down that child's subtree // otherwise recur down that child's subtree
keyrange::comparison c = range.compare(m_cmp, m_range); keyrange::comparison c = range.compare(*m_cmp, m_range);
if (c == keyrange::comparison::LESS_THAN) { if (c == keyrange::comparison::LESS_THAN) {
treenode *left_child = lock_and_rebalance_left(); treenode *left_child = lock_and_rebalance_left();
if (left_child == nullptr) { if (left_child == nullptr) {
...@@ -382,7 +382,7 @@ treenode *treenode::remove(const keyrange &range) { ...@@ -382,7 +382,7 @@ treenode *treenode::remove(const keyrange &range) {
// if the range is equal to this node's range, then just remove // if the range is equal to this node's range, then just remove
// the root of this subtree. otherwise search down the tree // the root of this subtree. otherwise search down the tree
// in either the left or right children. // in either the left or right children.
keyrange::comparison c = range.compare(m_cmp, m_range); keyrange::comparison c = range.compare(*m_cmp, m_range);
switch (c) { switch (c) {
case keyrange::comparison::EQUALS: case keyrange::comparison::EQUALS:
return remove_root_of_subtree(); return remove_root_of_subtree();
......
...@@ -123,7 +123,7 @@ class treenode { ...@@ -123,7 +123,7 @@ class treenode {
// - node may be unlocked if no other thread has visibility // - node may be unlocked if no other thread has visibility
// effect: create the root node // effect: create the root node
void create_root(comparator *cmp); void create_root(const comparator *cmp);
// effect: destroys the root node // effect: destroys the root node
void destroy_root(void); void destroy_root(void);
...@@ -210,7 +210,7 @@ class treenode { ...@@ -210,7 +210,7 @@ class treenode {
child_ptr m_right_child; child_ptr m_right_child;
// comparator for ranges // comparator for ranges
comparator *m_cmp; const comparator *m_cmp;
// marked for the root node. the root node is never free()'d // marked for the root node. the root node is never free()'d
// when removed, but instead marked as empty. // when removed, but instead marked as empty.
...@@ -220,7 +220,7 @@ class treenode { ...@@ -220,7 +220,7 @@ class treenode {
bool m_is_empty; bool m_is_empty;
// effect: initializes an empty node with the given comparator // effect: initializes an empty node with the given comparator
void init(comparator *cmp); void init(const comparator *cmp);
// requires: *parent is initialized to something meaningful. // requires: *parent is initialized to something meaningful.
// requires: subtree is non-empty // requires: subtree is non-empty
...@@ -267,7 +267,7 @@ class treenode { ...@@ -267,7 +267,7 @@ class treenode {
treenode *maybe_rebalance(void); treenode *maybe_rebalance(void);
// returns: allocated treenode populated with a copy of the range and txnid // returns: allocated treenode populated with a copy of the range and txnid
static treenode *alloc(comparator *cmp, const keyrange &range, TXNID txnid); static treenode *alloc(const comparator *cmp, const keyrange &range, TXNID txnid);
// requires: node is a locked root node, or an unlocked non-root node // requires: node is a locked root node, or an unlocked non-root node
static void free(treenode *node); static void free(treenode *node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment