Commit 3c25dc75 authored by Barry Perlman's avatar Barry Perlman Committed by Yoni Fogel

[t:4050] #4050 Merge tokudb.4050 to merge, done with command svn merge...

[t:4050] #4050 Merge tokudb.4050 to merge, done with command svn merge -r36213:HEAD tokudb.4050 tokudb

git-svn-id: file:///svn/toku/tokudb@36808 c7de825b-a66e-492c-adef-691d508d4ae1
parent f977b6e7
...@@ -66,6 +66,7 @@ BRT_SOURCES = \ ...@@ -66,6 +66,7 @@ BRT_SOURCES = \
log_print \ log_print \
logcursor \ logcursor \
memarena \ memarena \
mempool \
minicron \ minicron \
omt \ omt \
pqueue \ pqueue \
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "leafentry.h" #include "leafentry.h"
#include "block_table.h" #include "block_table.h"
#include "c_dialects.h" #include "c_dialects.h"
#include "mempool.h"
// Uncomment the following to use quicklz // Uncomment the following to use quicklz
...@@ -132,10 +133,12 @@ int toku_bnc_flush_to_child( ...@@ -132,10 +133,12 @@ int toku_bnc_flush_to_child(
// data of an available partition of a leaf brtnode // data of an available partition of a leaf brtnode
struct brtnode_leaf_basement_node { struct brtnode_leaf_basement_node {
OMT buffer; OMT buffer; // pointers to individual leaf entries
unsigned int n_bytes_in_buffer; /* How many bytes to represent the OMT (including the per-key overheads, but not including the overheads for the node. */ struct mempool buffer_mempool; // storage for all leaf entries
unsigned int seqinsert; /* number of sequential inserts to this leaf */ unsigned int n_bytes_in_buffer; // How many bytes to represent the OMT (including the per-key overheads, ...
MSN max_msn_applied; // max message sequence number applied // ... but not including the overheads for the node.
unsigned int seqinsert; // number of sequential inserts to this leaf
MSN max_msn_applied; // max message sequence number applied
bool stale_ancestor_messages_applied; bool stale_ancestor_messages_applied;
}; };
...@@ -302,6 +305,7 @@ static inline void set_BSB(BRTNODE node, int i, SUB_BLOCK sb) { ...@@ -302,6 +305,7 @@ static inline void set_BSB(BRTNODE node, int i, SUB_BLOCK sb) {
#define BLB_MAX_MSN_APPLIED(node,i) (BLB(node,i)->max_msn_applied) #define BLB_MAX_MSN_APPLIED(node,i) (BLB(node,i)->max_msn_applied)
#define BLB_MAX_DSN_APPLIED(node,i) (BLB(node,i)->max_dsn_applied) #define BLB_MAX_DSN_APPLIED(node,i) (BLB(node,i)->max_dsn_applied)
#define BLB_BUFFER(node,i) (BLB(node,i)->buffer) #define BLB_BUFFER(node,i) (BLB(node,i)->buffer)
#define BLB_BUFFER_MEMPOOL(node,i) (BLB(node,i)->buffer_mempool)
#define BLB_NBYTESINBUF(node,i) (BLB(node,i)->n_bytes_in_buffer) #define BLB_NBYTESINBUF(node,i) (BLB(node,i)->n_bytes_in_buffer)
#define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert) #define BLB_SEQINSERT(node,i) (BLB(node,i)->seqinsert)
...@@ -683,6 +687,12 @@ int toku_cmd_leafval_heaviside (OMTVALUE leafentry, void *extra) ...@@ -683,6 +687,12 @@ int toku_cmd_leafval_heaviside (OMTVALUE leafentry, void *extra)
int toku_brt_root_put_cmd(BRT brt, BRT_MSG_S * cmd) int toku_brt_root_put_cmd(BRT brt, BRT_MSG_S * cmd)
__attribute__((__warn_unused_result__)); __attribute__((__warn_unused_result__));
void *mempool_malloc_from_omt(OMT omt, struct mempool *mp, size_t size, void **maybe_free);
// Effect: Allocate a new object of size SIZE in MP. If MP runs out of space, allocate new a new mempool space, and copy all the items
// from the OMT (which items refer to items in the old mempool) into the new mempool.
// If MAYBE_FREE is NULL then free the old mempool's space.
// Otherwise, store the old mempool's space in maybe_free.
int toku_verify_brtnode (BRT brt, MSN rootmsn, MSN parentmsn, int toku_verify_brtnode (BRT brt, MSN rootmsn, MSN parentmsn,
BLOCKNUM blocknum, int height, struct kv_pair *lesser_pivot, struct kv_pair *greatereq_pivot, BLOCKNUM blocknum, int height, struct kv_pair *lesser_pivot, struct kv_pair *greatereq_pivot,
int (*progress_callback)(void *extra, float progress), void *extra, int (*progress_callback)(void *extra, float progress), void *extra,
......
This diff is collapsed.
...@@ -129,7 +129,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke ...@@ -129,7 +129,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke
toku_verify_or_set_counts(node); toku_verify_or_set_counts(node);
assert(node->height==0); assert(node->height==0);
size_t lesize, disksize; size_t newlesize;
LEAFENTRY leafentry; LEAFENTRY leafentry;
OMTVALUE storeddatav; OMTVALUE storeddatav;
u_int32_t idx; u_int32_t idx;
...@@ -139,8 +139,17 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke ...@@ -139,8 +139,17 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke
.u.id={toku_fill_dbt(&keydbt, key, keylen), .u.id={toku_fill_dbt(&keydbt, key, keylen),
toku_fill_dbt(&valdbt, val, vallen)}}; toku_fill_dbt(&valdbt, val, vallen)}};
//Generate a leafentry (committed insert key,val) //Generate a leafentry (committed insert key,val)
uint childnum = toku_brtnode_which_child(node,
&keydbt,
&brt->h->descriptor, brt->compare_fun);
BASEMENTNODE bn = BLB(node, childnum);
void * maybe_free = 0;
r = apply_msg_to_leafentry(&cmd, NULL, //No old leafentry r = apply_msg_to_leafentry(&cmd, NULL, //No old leafentry
&lesize, &disksize, &leafentry, &newlesize, &leafentry,
bn->buffer, &bn->buffer_mempool, &maybe_free,
NULL, NULL); NULL, NULL);
assert(r==0); assert(r==0);
...@@ -163,7 +172,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke ...@@ -163,7 +172,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke
// hack to get tests passing. These tests should not be directly inserting into buffers // hack to get tests passing. These tests should not be directly inserting into buffers
BLB(node, 0)->max_msn_applied = msn; BLB(node, 0)->max_msn_applied = msn;
BLB_NBYTESINBUF(node, 0) += disksize; BLB_NBYTESINBUF(node, 0) += newlesize;
node->dirty=1; node->dirty=1;
......
This diff is collapsed.
...@@ -168,7 +168,6 @@ leafentry_disksize_13(LEAFENTRY_13 le); ...@@ -168,7 +168,6 @@ leafentry_disksize_13(LEAFENTRY_13 le);
int int
toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored data. toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize,
LEAFENTRY *new_leafentry_p); LEAFENTRY *new_leafentry_p);
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id: mempool.c 19902 2010-05-06 20:41:32Z bkuszmaul $"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "includes.h"
/* Contract:
* Caller allocates mempool struct as convenient for caller, but memory used for data storage
* must be dynamically allocated via toku_malloc().
* Caller dynamically allocates memory for mempool and initializes mempool by calling toku_mempool_init().
* Once a buffer is assigned to a mempool (via toku_mempool_init()), the mempool owns it and
* is responsible for destroying it when the mempool is destroyed.
* Caller destroys mempool by calling toku_mempool_destroy().
*
* Note, toku_mempool_init() does not allocate the memory because sometimes the caller will already have
* the memory allocated and will assign the pre-allocated memory to the mempool.
*/
/* This is a constructor to be used when the memory for the mempool struct has been
* allocated by the caller, but no memory has yet been allocatd for the data.
*/
void toku_mempool_zero(struct mempool *mp) {
// printf("mempool_zero %p\n", mp);
memset(mp, 0, sizeof(*mp));
}
/* Copy constructor. Any time a new mempool is needed, allocate 1/4 more space
* than is currently needed.
*/
void toku_mempool_copy_construct(struct mempool *mp, const void * const data_source, const size_t data_size) {
// printf("mempool_copy %p %p %lu\n", mp, data_source, data_size);
if (data_size) {
invariant(data_source);
toku_mempool_construct(mp, data_size);
memcpy(mp->base, data_source, data_size);
mp->free_offset = data_size; // address of first available memory for new data
}
else {
toku_mempool_zero(mp);
// fprintf(stderr, "Empty mempool created (copy constructor)\n");
}
}
// TODO 4050 this is dirty, try to replace all uses of this
void toku_mempool_init(struct mempool *mp, void *base, size_t size) {
// printf("mempool_init %p %p %lu\n", mp, base, size);
invariant(base != 0);
invariant(size < (1U<<31)); // used to be assert(size >= 0), but changed to size_t so now let's make sure it's not more than 2GB...
mp->base = base;
mp->size = size;
mp->free_offset = 0; // address of first available memory
mp->frag_size = 0; // byte count of wasted space (formerly used, no longer used or available)
}
/* allocate memory and construct mempool
*/
void toku_mempool_construct(struct mempool *mp, size_t data_size) {
if (data_size) {
size_t mpsize = data_size + (data_size/4); // allow 1/4 room for expansion (would be wasted if read-only)
mp->base = toku_xmalloc(mpsize); // allocate buffer for mempool
mp->size = mpsize;
mp->free_offset = 0; // address of first available memory for new data
mp->frag_size = 0; // all allocated space is now in use
}
else {
toku_mempool_zero(mp);
// fprintf(stderr, "Empty mempool created (base constructor)\n");
}
}
void toku_mempool_destroy(struct mempool *mp) {
// printf("mempool_destroy %p %p %lu %lu\n", mp, mp->base, mp->size, mp->frag_size);
if (mp->base)
toku_free(mp->base);
toku_mempool_zero(mp);
}
void *toku_mempool_get_base(struct mempool *mp) {
return mp->base;
}
size_t toku_mempool_get_size(struct mempool *mp) {
return mp->size;
}
size_t toku_mempool_get_frag_size(struct mempool *mp) {
return mp->frag_size;
}
size_t toku_mempool_get_used_space(struct mempool *mp) {
return mp->free_offset - mp->frag_size;
}
size_t toku_mempool_get_free_space(struct mempool *mp) {
return mp->size - mp->free_offset;
}
void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment) {
invariant(size < (1U<<31));
invariant(mp->size < (1U<<31));
invariant(mp->free_offset < (1U<<31));
assert(mp->free_offset <= mp->size);
void *vp;
size_t offset = (mp->free_offset + (alignment-1)) & ~(alignment-1);
//printf("mempool_malloc size=%ld base=%p free_offset=%ld mp->size=%ld offset=%ld\n", size, mp->base, mp->free_offset, mp->size, offset);
if (offset + size > mp->size) {
vp = 0;
} else {
vp = (char *)mp->base + offset;
mp->free_offset = offset + size;
}
assert(mp->free_offset <= mp->size);
assert(((long)vp & (alignment-1)) == 0);
assert(vp == 0 || toku_mempool_inrange(mp, vp, size));
//printf("mempool returning %p\n", vp);
return vp;
}
// if vp is null then we are freeing something, but not specifying what. The data won't be freed until compression is done.
void toku_mempool_mfree(struct mempool *mp, void *vp, size_t size) {
if (vp) assert(toku_mempool_inrange(mp, vp, size));
mp->frag_size += size;
assert(mp->frag_size <= mp->size);
}
#ifndef _TOKU_MEMPOOL_H
#define _TOKU_MEMPOOL_H
#ident "$Id: mempool.h 19902 2010-05-06 20:41:32Z bkuszmaul $"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/* a memory pool is a contiguous region of memory that supports single
allocations from the pool. these allocated regions are never recycled.
when the memory pool no longer has free space, the allocated chunks
must be relocated by the application to a new memory pool. */
#include <sys/types.h>
#if defined(__cplusplus) || defined(__cilkplusplus)
extern "C" {
#endif
struct mempool;
// TODO 4050 Hide mempool struct internals from callers
struct mempool {
void *base; /* the base address of the memory */
size_t free_offset; /* the offset of the memory pool free space */
size_t size; /* the size of the memory */
size_t frag_size; /* the size of the fragmented memory */
};
/* This is a constructor to be used when the memory for the mempool struct has been
* allocated by the caller, but no memory has yet been allocatd for the data.
*/
void toku_mempool_zero(struct mempool *mp);
/* Copy constructor. Fill in empty mempool struct with new values, allocating
* a new buffer and filling the buffer with data from from data_source.
* Any time a new mempool is needed, allocate 1/4 more space
* than is currently needed.
*/
void toku_mempool_copy_construct(struct mempool *mp, const void * const data_source, const size_t data_size);
/* initialize the memory pool with the base address and size of a
contiguous chunk of memory */
void toku_mempool_init(struct mempool *mp, void *base, size_t size);
/* allocate memory and construct mempool
*/
void toku_mempool_construct(struct mempool *mp, size_t data_size);
/* destroy the memory pool */
void toku_mempool_destroy(struct mempool *mp);
/* get the base address of the memory pool */
void *toku_mempool_get_base(struct mempool *mp);
/* get the size of the memory pool */
size_t toku_mempool_get_size(struct mempool *mp);
/* get the amount of fragmented (wasted) space in the memory pool */
size_t toku_mempool_get_frag_size(struct mempool *mp);
/* get the amount of space that is holding useful data */
size_t toku_mempool_get_used_space(struct mempool *mp);
/* get the amount of space that is available for new data */
size_t toku_mempool_get_free_space(struct mempool *mp);
/* allocate a chunk of memory from the memory pool suitably aligned */
void *toku_mempool_malloc(struct mempool *mp, size_t size, int alignment);
/* free a previously allocated chunk of memory. the free only updates
a count of the amount of free space in the memory pool. the memory
pool does not keep track of the locations of the free chunks */
void toku_mempool_mfree(struct mempool *mp, void *vp, size_t size);
/* verify that a memory range is contained within a mempool */
static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size) {
return (mp->base <= vp) && ((char *)vp + size <= (char *)mp->base + mp->size);
}
#if defined(__cplusplus) || defined(__cilkplusplus)
};
#endif
#endif
This diff is collapsed.
...@@ -106,17 +106,16 @@ insert_random_message_to_leaf(BRT t, BASEMENTNODE blb, LEAFENTRY *save, XIDS xid ...@@ -106,17 +106,16 @@ insert_random_message_to_leaf(BRT t, BASEMENTNODE blb, LEAFENTRY *save, XIDS xid
toku_fill_dbt(keydbt, key, keylen + (sizeof pfx)); toku_fill_dbt(keydbt, key, keylen + (sizeof pfx));
toku_fill_dbt(valdbt, val, vallen); toku_fill_dbt(valdbt, val, vallen);
BRT_MSG_S msg; BRT_MSG_S msg;
BRT_MSG_S *result = &msg; msg.type = BRT_INSERT;
result->type = BRT_INSERT; msg.msn = msn;
result->msn = msn; msg.xids = xids;
result->xids = xids; msg.u.id.key = keydbt;
result->u.id.key = keydbt; msg.u.id.val = valdbt;
result->u.id.val = valdbt; size_t memsize;
size_t memsize, disksize; int r = apply_msg_to_leafentry(&msg, NULL, &memsize, save, NULL, NULL, NULL, NULL, NULL);
int r = apply_msg_to_leafentry(result, NULL, &memsize, &disksize, save, NULL, NULL);
assert_zero(r); assert_zero(r);
bool made_change; bool made_change;
brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb, result, &made_change, NULL, NULL, NULL); brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb, &msg, &made_change, NULL, NULL, NULL);
if (msn.msn > blb->max_msn_applied.msn) { if (msn.msn > blb->max_msn_applied.msn) {
blb->max_msn_applied = msn; blb->max_msn_applied = msn;
} }
...@@ -140,21 +139,20 @@ insert_same_message_to_leaves(BRT t, BASEMENTNODE blb1, BASEMENTNODE blb2, LEAFE ...@@ -140,21 +139,20 @@ insert_same_message_to_leaves(BRT t, BASEMENTNODE blb1, BASEMENTNODE blb2, LEAFE
toku_fill_dbt(keydbt, key, keylen + (sizeof pfx)); toku_fill_dbt(keydbt, key, keylen + (sizeof pfx));
toku_fill_dbt(valdbt, val, vallen); toku_fill_dbt(valdbt, val, vallen);
BRT_MSG_S msg; BRT_MSG_S msg;
BRT_MSG_S *result = &msg; msg.type = BRT_INSERT;
result->type = BRT_INSERT; msg.msn = msn;
result->msn = msn; msg.xids = xids;
result->xids = xids; msg.u.id.key = keydbt;
result->u.id.key = keydbt; msg.u.id.val = valdbt;
result->u.id.val = valdbt; size_t memsize;
size_t memsize, disksize; int r = apply_msg_to_leafentry(&msg, NULL, &memsize, save, NULL, NULL, NULL, NULL, NULL);
int r = apply_msg_to_leafentry(result, NULL, &memsize, &disksize, save, NULL, NULL);
assert_zero(r); assert_zero(r);
bool made_change; bool made_change;
brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb1, result, &made_change, NULL, NULL, NULL); brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb1, &msg, &made_change, NULL, NULL, NULL);
if (msn.msn > blb1->max_msn_applied.msn) { if (msn.msn > blb1->max_msn_applied.msn) {
blb1->max_msn_applied = msn; blb1->max_msn_applied = msn;
} }
brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb2, result, &made_change, NULL, NULL, NULL); brt_leaf_put_cmd(t->compare_fun, t->update_fun, NULL, blb2, &msg, &made_change, NULL, NULL, NULL);
if (msn.msn > blb2->max_msn_applied.msn) { if (msn.msn > blb2->max_msn_applied.msn) {
blb2->max_msn_applied = msn; blb2->max_msn_applied = msn;
} }
......
...@@ -130,11 +130,8 @@ test_le_offsets (void) { ...@@ -130,11 +130,8 @@ test_le_offsets (void) {
static void static void
test_ule_packs_to_nothing (ULE ule) { test_ule_packs_to_nothing (ULE ule) {
size_t memsize; size_t memsize;
size_t disksize;
LEAFENTRY le; LEAFENTRY le;
int r = le_pack(ule, int r = le_pack(ule, &memsize, &le, NULL, NULL, NULL);
&memsize, &disksize,
&le);
assert(r==0); assert(r==0);
assert(le==NULL); assert(le==NULL);
} }
...@@ -177,16 +174,13 @@ test_le_empty_packs_to_nothing (void) { ...@@ -177,16 +174,13 @@ test_le_empty_packs_to_nothing (void) {
} }
static void static void
le_verify_accessors(LEAFENTRY le, ULE ule, le_verify_accessors(LEAFENTRY le, ULE ule, size_t pre_calculated_memsize) {
size_t pre_calculated_memsize,
size_t pre_calculated_disksize) {
assert(le); assert(le);
assert(ule->num_cuxrs > 0); assert(ule->num_cuxrs > 0);
assert(ule->num_puxrs <= MAX_TRANSACTION_RECORDS); assert(ule->num_puxrs <= MAX_TRANSACTION_RECORDS);
assert(ule->uxrs[ule->num_cuxrs + ule->num_puxrs-1].type != XR_PLACEHOLDER); assert(ule->uxrs[ule->num_cuxrs + ule->num_puxrs-1].type != XR_PLACEHOLDER);
//Extract expected values from ULE //Extract expected values from ULE
size_t memsize = le_memsize_from_ule(ule); size_t memsize = le_memsize_from_ule(ule);
size_t disksize = le_memsize_from_ule(ule);
size_t num_uxrs = ule->num_cuxrs + ule->num_puxrs; size_t num_uxrs = ule->num_cuxrs + ule->num_puxrs;
void *key = ule->keyp; void *key = ule->keyp;
...@@ -209,10 +203,7 @@ found_insert:; ...@@ -209,10 +203,7 @@ found_insert:;
assert(le!=NULL); assert(le!=NULL);
//Verify all accessors //Verify all accessors
assert(memsize == pre_calculated_memsize); assert(memsize == pre_calculated_memsize);
assert(disksize == pre_calculated_disksize);
assert(memsize == disksize);
assert(memsize == leafentry_memsize(le)); assert(memsize == leafentry_memsize(le));
assert(disksize == leafentry_disksize(le));
{ {
u_int32_t test_keylen; u_int32_t test_keylen;
void* test_keyp = le_key_and_len(le, &test_keylen); void* test_keyp = le_key_and_len(le, &test_keylen);
...@@ -265,26 +256,19 @@ test_le_pack_committed (void) { ...@@ -265,26 +256,19 @@ test_le_pack_committed (void) {
ule.uxrs[0].vallen = valsize; ule.uxrs[0].vallen = valsize;
size_t memsize; size_t memsize;
size_t disksize;
LEAFENTRY le; LEAFENTRY le;
int r = le_pack(&ule, int r = le_pack(&ule, &memsize, &le, NULL, NULL, NULL);
&memsize, &disksize,
&le);
assert(r==0); assert(r==0);
assert(le!=NULL); assert(le!=NULL);
le_verify_accessors(le, &ule, memsize, disksize); le_verify_accessors(le, &ule, memsize);
ULE_S tmp_ule; ULE_S tmp_ule;
le_unpack(&tmp_ule, le); le_unpack(&tmp_ule, le);
verify_ule_equal(&ule, &tmp_ule); verify_ule_equal(&ule, &tmp_ule);
LEAFENTRY tmp_le; LEAFENTRY tmp_le;
size_t tmp_memsize; size_t tmp_memsize;
size_t tmp_disksize; r = le_pack(&tmp_ule, &tmp_memsize, &tmp_le, NULL, NULL, NULL);
r = le_pack(&tmp_ule,
&tmp_memsize, &tmp_disksize,
&tmp_le);
assert(r==0); assert(r==0);
assert(tmp_memsize == memsize); assert(tmp_memsize == memsize);
assert(tmp_disksize == disksize);
assert(memcmp(le, tmp_le, memsize) == 0); assert(memcmp(le, tmp_le, memsize) == 0);
toku_free(tmp_le); toku_free(tmp_le);
...@@ -334,26 +318,19 @@ test_le_pack_uncommitted (u_int8_t committed_type, u_int8_t prov_type, int num_p ...@@ -334,26 +318,19 @@ test_le_pack_uncommitted (u_int8_t committed_type, u_int8_t prov_type, int num_p
ule.uxrs[idx].valp = pval; ule.uxrs[idx].valp = pval;
size_t memsize; size_t memsize;
size_t disksize;
LEAFENTRY le; LEAFENTRY le;
int r = le_pack(&ule, int r = le_pack(&ule, &memsize, &le, NULL, NULL, NULL);
&memsize, &disksize,
&le);
assert(r==0); assert(r==0);
assert(le!=NULL); assert(le!=NULL);
le_verify_accessors(le, &ule, memsize, disksize); le_verify_accessors(le, &ule, memsize);
ULE_S tmp_ule; ULE_S tmp_ule;
le_unpack(&tmp_ule, le); le_unpack(&tmp_ule, le);
verify_ule_equal(&ule, &tmp_ule); verify_ule_equal(&ule, &tmp_ule);
LEAFENTRY tmp_le; LEAFENTRY tmp_le;
size_t tmp_memsize; size_t tmp_memsize;
size_t tmp_disksize; r = le_pack(&tmp_ule, &tmp_memsize, &tmp_le, NULL, NULL, NULL);
r = le_pack(&tmp_ule,
&tmp_memsize, &tmp_disksize,
&tmp_le);
assert(r==0); assert(r==0);
assert(tmp_memsize == memsize); assert(tmp_memsize == memsize);
assert(tmp_disksize == disksize);
assert(memcmp(le, tmp_le, memsize) == 0); assert(memcmp(le, tmp_le, memsize) == 0);
toku_free(tmp_le); toku_free(tmp_le);
...@@ -412,34 +389,29 @@ test_le_apply(ULE ule_initial, BRT_MSG msg, ULE ule_expected) { ...@@ -412,34 +389,29 @@ test_le_apply(ULE ule_initial, BRT_MSG msg, ULE ule_expected) {
LEAFENTRY le_result; LEAFENTRY le_result;
size_t initial_memsize; size_t initial_memsize;
size_t initial_disksize; r = le_pack(ule_initial, &initial_memsize, &le_initial, NULL, NULL, NULL);
r = le_pack(ule_initial, &initial_memsize, &initial_disksize,
&le_initial);
CKERR(r); CKERR(r);
size_t result_memsize; size_t result_memsize;
size_t result_disksize;
r = apply_msg_to_leafentry(msg, r = apply_msg_to_leafentry(msg,
le_initial, le_initial,
&result_memsize, &result_disksize, &result_memsize,
&le_result, &le_result,
NULL, NULL, NULL,
NULL, NULL); NULL, NULL);
CKERR(r); CKERR(r);
if (le_result) if (le_result)
le_verify_accessors(le_result, ule_expected, result_memsize, result_disksize); le_verify_accessors(le_result, ule_expected, result_memsize);
size_t expected_memsize; size_t expected_memsize;
size_t expected_disksize; r = le_pack(ule_expected, &expected_memsize, &le_expected, NULL, NULL, NULL);
r = le_pack(ule_expected, &expected_memsize, &expected_disksize,
&le_expected);
CKERR(r); CKERR(r);
verify_le_equal(le_result, le_expected); verify_le_equal(le_result, le_expected);
if (le_result && le_expected) { if (le_result && le_expected) {
assert(result_memsize == expected_memsize); assert(result_memsize == expected_memsize);
assert(result_disksize == expected_disksize);
} }
if (le_initial) toku_free(le_initial); if (le_initial) toku_free(le_initial);
if (le_result) toku_free(le_result); if (le_result) toku_free(le_result);
......
This diff is collapsed.
...@@ -62,10 +62,11 @@ void test_msg_modify_ule(ULE ule, BRT_MSG msg); ...@@ -62,10 +62,11 @@ void test_msg_modify_ule(ULE ule, BRT_MSG msg);
//Functions exported for test purposes only (used internally for non-test purposes). //Functions exported for test purposes only (used internally for non-test purposes).
void le_unpack(ULE ule, LEAFENTRY le); void le_unpack(ULE ule, LEAFENTRY le);
int le_pack(ULE ule, // data to be packed into new leafentry int le_pack(ULE ule, // data to be packed into new leafentry
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize, LEAFENTRY * const new_leafentry_p, // this is what this function creates
LEAFENTRY * const new_leafentry_p // this is what this function creates OMT omt,
); struct mempool *mp,
void **maybe_free);
size_t le_memsize_from_ule (ULE ule); size_t le_memsize_from_ule (ULE ule);
......
...@@ -120,12 +120,19 @@ static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p); ...@@ -120,12 +120,19 @@ static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p);
static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p); static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p);
static void * static void *
le_malloc(size_t size) le_malloc(OMT omt, struct mempool *mp, size_t size, void **maybe_free)
{ {
return toku_xmalloc(size); void * rval;
if (omt)
rval = mempool_malloc_from_omt(omt, mp, size, maybe_free);
else
rval = toku_xmalloc(size);
resource_assert(rval);
return rval;
} }
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// Garbage collection related functions // Garbage collection related functions
// //
...@@ -288,13 +295,15 @@ done:; ...@@ -288,13 +295,15 @@ done:;
// Return 0 on success. // Return 0 on success.
// If the leafentry is destroyed it sets *new_leafentry_p to NULL. // If the leafentry is destroyed it sets *new_leafentry_p to NULL.
// Otehrwise the new_leafentry_p points at the new leaf entry. // Otehrwise the new_leafentry_p points at the new leaf entry.
// As of September 2010, the only possible error returned is ENOMEM. // As of October 2011, this function always returns 0.
int int
apply_msg_to_leafentry(BRT_MSG msg, // message to apply to leafentry apply_msg_to_leafentry(BRT_MSG msg, // message to apply to leafentry
LEAFENTRY old_leafentry, // NULL if there was no stored data. LEAFENTRY old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt,
struct mempool *mp,
void **maybe_free,
OMT snapshot_xids, OMT snapshot_xids,
OMT live_list_reverse) { OMT live_list_reverse) {
ULE_S ule; ULE_S ule;
...@@ -309,9 +318,11 @@ apply_msg_to_leafentry(BRT_MSG msg, // message to apply to leafentry ...@@ -309,9 +318,11 @@ apply_msg_to_leafentry(BRT_MSG msg, // message to apply to leafentry
garbage_collection(&ule, snapshot_xids, live_list_reverse); garbage_collection(&ule, snapshot_xids, live_list_reverse);
} }
rval = le_pack(&ule, // create packed leafentry rval = le_pack(&ule, // create packed leafentry
new_leafentry_memorysize, new_leafentry_memorysize,
new_leafentry_disksize, new_leafentry_p,
new_leafentry_p omt,
mp,
maybe_free
); );
ule_cleanup(&ule); ule_cleanup(&ule);
return rval; return rval;
...@@ -625,9 +636,10 @@ update_le_status(ULE ule, size_t memsize, LE_STATUS s) { ...@@ -625,9 +636,10 @@ update_le_status(ULE ule, size_t memsize, LE_STATUS s) {
int int
le_pack(ULE ule, // data to be packed into new leafentry le_pack(ULE ule, // data to be packed into new leafentry
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize, LEAFENTRY * const new_leafentry_p, // this is what this function creates
LEAFENTRY * const new_leafentry_p // this is what this function creates OMT omt,
) struct mempool *mp,
void **maybe_free)
{ {
invariant(ule->num_cuxrs > 0); invariant(ule->num_cuxrs > 0);
invariant(ule->uxrs[0].xid == TXNID_NONE); invariant(ule->uxrs[0].xid == TXNID_NONE);
...@@ -651,11 +663,8 @@ le_pack(ULE ule, // data to be packed into new leafen ...@@ -651,11 +663,8 @@ le_pack(ULE ule, // data to be packed into new leafen
} }
found_insert:; found_insert:;
memsize = le_memsize_from_ule(ule); memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry = le_malloc(memsize); LEAFENTRY new_leafentry = le_malloc(omt, mp, memsize, maybe_free);
if (new_leafentry==NULL) {
rval = ENOMEM;
goto cleanup;
}
//Universal data //Universal data
new_leafentry->keylen = toku_htod32(ule->keylen); new_leafentry->keylen = toku_htod32(ule->keylen);
...@@ -781,7 +790,6 @@ found_insert:; ...@@ -781,7 +790,6 @@ found_insert:;
*new_leafentry_p = (LEAFENTRY)new_leafentry; *new_leafentry_p = (LEAFENTRY)new_leafentry;
*new_leafentry_memorysize = memsize; *new_leafentry_memorysize = memsize;
*new_leafentry_disksize = memsize;
rval = 0; rval = 0;
cleanup: cleanup:
update_le_status(ule, memsize, &status); update_le_status(ule, memsize, &status);
...@@ -2210,7 +2218,6 @@ leafentry_disksize_13(LEAFENTRY_13 le) { ...@@ -2210,7 +2218,6 @@ leafentry_disksize_13(LEAFENTRY_13 le) {
int int
toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize,
LEAFENTRY *new_leafentry_p) { LEAFENTRY *new_leafentry_p) {
ULE_S ule; ULE_S ule;
int rval; int rval;
...@@ -2218,8 +2225,8 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, ...@@ -2218,8 +2225,8 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
le_unpack_13(&ule, old_leafentry); le_unpack_13(&ule, old_leafentry);
rval = le_pack(&ule, // create packed leafentry rval = le_pack(&ule, // create packed leafentry
new_leafentry_memorysize, new_leafentry_memorysize,
new_leafentry_disksize, new_leafentry_p,
new_leafentry_p); NULL, NULL, NULL); // NULL for omt means that we use malloc instead of mempool
ule_cleanup(&ule); ule_cleanup(&ule);
return rval; return rval;
} }
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
extern "C" { extern "C" {
#endif #endif
#include "mempool.h"
// opaque handles used by outside world (i.e. indexer) // opaque handles used by outside world (i.e. indexer)
typedef struct ule *ULEHANDLE; typedef struct ule *ULEHANDLE;
typedef struct uxr *UXRHANDLE; typedef struct uxr *UXRHANDLE;
...@@ -53,8 +55,10 @@ void fast_msg_to_leafentry( ...@@ -53,8 +55,10 @@ void fast_msg_to_leafentry(
int apply_msg_to_leafentry(BRT_MSG msg, int apply_msg_to_leafentry(BRT_MSG msg,
LEAFENTRY old_leafentry, // NULL if there was no stored data. LEAFENTRY old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
size_t *new_leafentry_disksize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt,
struct mempool *mp,
void **maybe_free,
OMT snapshot_xids, OMT snapshot_xids,
OMT live_list_reverse); OMT live_list_reverse);
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2009 Tokutek Inc. All rights reserved."
#ident "$Id: env_startup.c 20778 2010-05-28 20:38:42Z yfogel $"
/* Purpose of this test is to verify that a failed assert will
* cause a panic, which should be visible via engine status.
* This is a manual test, should not be checked in to repository.
* The panic must be manually induced in the debugger.
*/
#include "test.h"
#include <db.h>
static DB_ENV *env;
#define FLAGS_NOLOG DB_INIT_LOCK|DB_INIT_MPOOL|DB_CREATE|DB_PRIVATE
#define FLAGS_LOG FLAGS_NOLOG|DB_INIT_TXN|DB_INIT_LOG
static int mode = S_IRWXU+S_IRWXG+S_IRWXO;
static void test_shutdown(void);
static void
test_shutdown(void) {
int r;
r=env->close(env, 0); CKERR(r);
env = NULL;
}
static void
setup (u_int32_t flags) {
int r;
if (env)
test_shutdown();
r = system("rm -rf " ENVDIR);
CKERR(r);
r=toku_os_mkdir(ENVDIR, S_IRWXU+S_IRWXG+S_IRWXO);
CKERR(r);
r=db_env_create(&env, 0);
CKERR(r);
env->set_errfile(env, stderr);
r=env->open(env, ENVDIR, flags, mode);
CKERR(r);
}
int
test_main (int argc, char * const argv[]) {
parse_args(argc, argv);
setup(FLAGS_LOG);
env->txn_checkpoint(env, 0, 0, 0);
print_engine_status(env);
test_shutdown();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment