Commit eb6bfea8 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

[t:4062], fix checkin

git-svn-id: file:///svn/toku/tokudb@36695 c7de825b-a66e-492c-adef-691d508d4ae1
parent b770c48f
......@@ -12,6 +12,8 @@ enum brtnode_verify_type {
read_none
};
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
static int
string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
{
......@@ -19,39 +21,108 @@ string_key_cmp(DB *UU(e), const DBT *a, const DBT *b)
return strcmp(s, t);
}
static int omt_cmp(OMTVALUE p, void *q)
{
LEAFENTRY a = p, b = q;
void *ak, *bk;
u_int32_t al, bl;
ak = le_key_and_len(a, &al);
bk = le_key_and_len(b, &bl);
int l = MIN(al, bl);
int c = memcmp(ak, bk, l);
if (c < 0) { return -1; }
if (c > 0) { return +1; }
int d = al - bl;
if (d < 0) { return -1; }
if (d > 0) { return +1; }
else { return 0; }
}
static LEAFENTRY
le_fastmalloc(char *key, int keylen, char *val, int vallen)
{
LEAFENTRY r = toku_malloc(sizeof(r->type) + sizeof(r->keylen) + sizeof(r->u.clean.vallen) +
keylen + vallen);
resource_assert(r);
r->type = LE_CLEAN;
r->keylen = keylen;
r->u.clean.vallen = vallen;
memcpy(&r->u.clean.key_val[0], key, keylen);
memcpy(&r->u.clean.key_val[keylen], val, vallen);
return r;
}
static LEAFENTRY
le_malloc(char *key, char *val)
{
int keylen = strlen(key) + 1;
int vallen = strlen(val) + 1;
return le_fastmalloc(key, keylen, val, vallen);
}
static void
test1(int fd, struct brt_header *brt_h, BRTNODE *dn) {
int r;
struct brtnode_fetch_extra bfe_all;
fill_bfe_for_full_read(&bfe_all, brt_h, NULL, string_key_cmp);
brt_h->compare_fun = string_key_cmp;
fill_bfe_for_full_read(&bfe_all, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_all);
BOOL is_leaf = ((*dn)->height == 0);
assert(r==0);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
// should sweep and NOT get rid of anything
long bytes_freed;
toku_brtnode_pe_callback(*dn, 0xffffffff, &bytes_freed, NULL);
PAIR_ATTR attr;
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
// should sweep and get compress all
toku_brtnode_pe_callback(*dn, 0xffffffff, &bytes_freed, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
if (!is_leaf) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED);
}
long size;
else {
assert(BP_STATE(*dn,i) == PT_ON_DISK);
}
}
PAIR_ATTR size;
BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_all);
assert(req);
toku_brtnode_pf_callback(*dn, &bfe_all, fd, &size);
toku_brtnode_pe_callback(*dn, 0xffffffff, &bytes_freed, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
// should sweep and get compress all
toku_brtnode_pe_callback(*dn, 0xffffffff, &bytes_freed, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
if (!is_leaf) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED);
}
else {
assert(BP_STATE(*dn,i) == PT_ON_DISK);
}
}
req = toku_brtnode_pf_req_callback(*dn, &bfe_all);
assert(req);
toku_brtnode_pf_callback(*dn, &bfe_all, fd, &size);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
(*dn)->dirty = 1;
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
toku_brtnode_free(dn);
}
......@@ -72,11 +143,10 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) {
memset(&right, 0, sizeof(right));
brt_search_t search_t;
brt_h->compare_fun = string_key_cmp;
fill_bfe_for_subset_read(
&bfe_subset,
brt_h,
&dummy_db,
string_key_cmp,
brt_search_init(
&search_t,
search_cmp,
......@@ -90,15 +160,61 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) {
TRUE
);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_subset);
printf("states %d %d %d %d\n", BP_STATE(*dn, 0), BP_SHOULD_EVICT(*dn, 0), BP_SHOULD_EVICT(*dn, 1), BP_STATE(*dn, 1));
BOOL is_leaf = ((*dn)->height == 0);
// at this point, although both partitions are available, only the
// second basement node should have had its clock
// touched
assert(BP_STATE(*dn, 0) == PT_AVAIL);
assert(BP_STATE(*dn, 1) == PT_AVAIL);
assert(BP_SHOULD_EVICT(*dn, 0));
assert(!BP_SHOULD_EVICT(*dn, 1));
PAIR_ATTR attr;
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
assert(BP_STATE(*dn, 0) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED);
assert(BP_STATE(*dn, 1) == PT_AVAIL);
assert(BP_SHOULD_EVICT(*dn, 1));
toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
assert(BP_STATE(*dn, 1) == (is_leaf) ? PT_ON_DISK : PT_COMPRESSED);
BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_subset);
assert(req);
toku_brtnode_pf_callback(*dn, &bfe_subset, fd, &attr);
assert(BP_STATE(*dn, 0) == PT_AVAIL);
assert(BP_STATE(*dn, 1) == PT_AVAIL);
assert(BP_SHOULD_EVICT(*dn, 0));
assert(!BP_SHOULD_EVICT(*dn, 1));
toku_brtnode_free(dn);
toku_brtnode_free(dn);
}
static void
test3_leaf(int fd, struct brt_header *brt_h, BRTNODE *dn) {
int r;
struct brtnode_fetch_extra bfe_min;
DBT left, right;
DB dummy_db;
memset(&dummy_db, 0, sizeof(dummy_db));
memset(&left, 0, sizeof(left));
memset(&right, 0, sizeof(right));
brt_h->compare_fun = string_key_cmp;
fill_bfe_for_min_read(
&bfe_min,
brt_h
);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_min);
//
// make sure we have a leaf
//
assert((*dn)->height == 0);
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn, i) == PT_ON_DISK);
}
toku_brtnode_free(dn);
}
static void
test_serialize_nonleaf(void) {
......@@ -191,7 +307,6 @@ test_serialize_nonleaf(void) {
test1(fd, brt_h, &dn);
test2(fd, brt_h, &dn);
kv_pair_free(sn.childkeys[0]);
toku_free(hello_string);
destroy_nonleaf_childinfo(BNC(&sn, 0));
......@@ -207,9 +322,107 @@ test_serialize_nonleaf(void) {
r = close(fd); assert(r != -1);
}
static void
test_serialize_leaf(void) {
// struct brt source_brt;
const int nodesize = 1024;
struct brtnode sn, *dn;
int fd = open(__FILE__ ".brt", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
int r;
sn.max_msn_applied_to_node_on_disk.msn = 0;
sn.nodesize = nodesize;
sn.flags = 0x11223344;
sn.thisnodename.b = 20;
sn.layout_version = BRT_LAYOUT_VERSION;
sn.layout_version_original = BRT_LAYOUT_VERSION;
sn.height = 0;
sn.optimized_for_upgrade = 1234;
sn.n_children = 2;
sn.dirty = 1;
LEAFENTRY elts[3];
elts[0] = le_malloc("a", "aval");
elts[1] = le_malloc("b", "bval");
elts[2] = le_malloc("x", "xval");
MALLOC_N(sn.n_children, sn.bp);
MALLOC_N(1, sn.childkeys);
sn.childkeys[0] = kv_pair_malloc("b", 2, 0, 0);
sn.totalchildkeylens = 2;
BP_SUBTREE_EST(&sn,0).ndata = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,1).ndata = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,0).nkeys = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,1).nkeys = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,0).dsize = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,1).dsize = random() + (((long long)random())<<32);
BP_SUBTREE_EST(&sn,0).exact = (BOOL)(random()%2 != 0);
BP_SUBTREE_EST(&sn,1).exact = (BOOL)(random()%2 != 0);
BP_STATE(&sn,0) = PT_AVAIL;
BP_STATE(&sn,1) = PT_AVAIL;
set_BLB(&sn, 0, toku_create_empty_bn());
set_BLB(&sn, 1, toku_create_empty_bn());
r = toku_omt_insert(BLB_BUFFER(&sn, 0), elts[0], omt_cmp, elts[0], NULL); assert(r==0);
r = toku_omt_insert(BLB_BUFFER(&sn, 0), elts[1], omt_cmp, elts[1], NULL); assert(r==0);
r = toku_omt_insert(BLB_BUFFER(&sn, 1), elts[2], omt_cmp, elts[2], NULL); assert(r==0);
BLB_NBYTESINBUF(&sn, 0) = 2*(KEY_VALUE_OVERHEAD+2+5) + toku_omt_size(BLB_BUFFER(&sn, 0));
BLB_NBYTESINBUF(&sn, 1) = 1*(KEY_VALUE_OVERHEAD+2+5) + toku_omt_size(BLB_BUFFER(&sn, 1));
struct brt *XMALLOC(brt);
struct brt_header *XCALLOC(brt_h);
brt->h = brt_h;
brt_h->type = BRTHEADER_CURRENT;
brt_h->panic = 0; brt_h->panic_string = 0;
brt_h->basementnodesize = 128*1024;
toku_blocktable_create_new(&brt_h->blocktable);
//Want to use block #20
BLOCKNUM b = make_blocknum(0);
while (b.b < 20) {
toku_allocate_blocknum(brt_h->blocktable, &b, brt_h);
}
assert(b.b == 20);
{
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0);
test1(fd, brt_h, &dn);
test3_leaf(fd, brt_h,&dn);
for (int i = 0; i < sn.n_children-1; ++i) {
kv_pair_free(sn.childkeys[i]);
}
for (int i = 0; i < 3; ++i) {
toku_free(elts[i]);
}
for (int i = 0; i < sn.n_children; i++) {
destroy_basement_node(BLB(&sn, i));
}
toku_free(sn.bp);
toku_free(sn.childkeys);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&brt_h->blocktable);
toku_free(brt_h);
toku_free(brt);
r = close(fd); assert(r != -1);
}
int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
test_serialize_nonleaf();
test_serialize_leaf();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment