Commit 75052cdc authored by John Esmet's avatar John Esmet

fixes #127 Add a class for scoped mallocs, which are satisfied from a

thread-local buffer when possible.
parent 8527aa5a
......@@ -89,6 +89,8 @@ PATENT RIGHTS GRANT:
#ident "$Id$"
#include <toku_portability.h>
#include <util/scoped_malloc.h>
#include <zlib.h>
#include <lzma.h>
......@@ -241,10 +243,10 @@ void toku_decompress (Bytef *dest, uLongf destLen,
}
case TOKU_QUICKLZ_METHOD:
if (sourceLen>1) {
qlz_state_decompress *XCALLOC(qsd);
toku::scoped_calloc state_buf(sizeof(qlz_state_decompress));
qlz_state_decompress *qsd = reinterpret_cast<qlz_state_decompress *>(state_buf.get());
uLongf actual_destlen = qlz_decompress((char*)source+1, dest, qsd);
assert(actual_destlen == destLen);
toku_free(qsd);
} else {
// length 1 means there is no data, so do nothing.
assert(destLen==0);
......
......@@ -223,6 +223,7 @@ basement nodes, bulk fetch, and partial fetch:
#include <util/status.h>
#include <util/rwlock.h>
#include <util/sort.h>
#include <util/scoped_malloc.h>
#include <stdint.h>
......@@ -4478,7 +4479,8 @@ bnc_apply_messages_to_basement_node(
// the relevant messages' offsets and sort them by MSN, then apply
// them in MSN order.
const int buffer_size = ((stale_ube - stale_lbi) + (fresh_ube - fresh_lbi) + bnc->broadcast_list.size());
int32_t *XMALLOC_N(buffer_size, offsets);
toku::scoped_malloc offsets_buf(buffer_size * sizeof(int32_t));
int32_t *offsets = reinterpret_cast<int32_t *>(offsets_buf.get());
struct store_fifo_offset_extra sfo_extra = { .offsets = offsets, .i = 0 };
// Populate offsets array with offsets to stale messages
......@@ -4504,8 +4506,6 @@ bnc_apply_messages_to_basement_node(
struct fifo_entry *entry = toku_fifo_get_entry(bnc->buffer, offsets[i]);
do_bn_apply_cmd(t, bn, entry, oldest_referenced_xid, &workdone_this_ancestor, &stats_delta);
}
toku_free(offsets);
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark them to be moved to stale later.
struct iterate_do_bn_apply_cmd_extra iter_extra = { .t = t, .bn = bn, .bnc = bnc, .oldest_referenced_xid = oldest_referenced_xid, .workdone = &workdone_this_ancestor, .stats_to_update = &stats_delta };
......@@ -6368,12 +6368,11 @@ int toku_ft_layer_init(void) {
toku_checkpoint_init();
toku_ft_serialize_layer_init();
toku_mutex_init(&ft_open_close_lock, NULL);
toku_scoped_malloc_init();
exit:
return r;
}
void toku_ft_layer_destroy(void) {
toku_mutex_destroy(&ft_open_close_lock);
toku_ft_serialize_layer_destroy();
......@@ -6382,6 +6381,7 @@ void toku_ft_layer_destroy(void) {
txn_status_destroy();
toku_context_status_destroy();
partitioned_counters_destroy();
toku_scoped_malloc_destroy();
//Portability must be cleaned up last
toku_portability_destroy();
}
......
......@@ -97,6 +97,7 @@ PATENT RIGHTS GRANT:
#include <util/threadpool.h>
#include "ft.h"
#include <util/status.h>
#include <util/scoped_malloc.h>
static FT_UPGRADE_STATUS_S ft_upgrade_status;
......@@ -217,12 +218,12 @@ toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int
}
if (to_write > 0) {
assert(to_write%512==0);
char *XMALLOC_N_ALIGNED(512, to_write, wbuf);
toku::scoped_malloc_aligned wbuf_aligned(to_write, 512);
char *wbuf = reinterpret_cast<char *>(wbuf_aligned.get());
memset(wbuf, 0, to_write);
toku_off_t start_write = alignup64(file_size, stripe_width);
invariant(start_write >= file_size);
toku_os_full_pwrite(fd, wbuf, to_write, start_write);
toku_free(wbuf);
*new_size = start_write + to_write;
}
else {
......@@ -356,10 +357,13 @@ serialize_nonleaf_childinfo(NONLEAF_CHILDINFO bnc, struct wbuf *wb)
//
static void
serialize_ftnode_partition(FTNODE node, int i, struct sub_block *sb) {
assert(sb->uncompressed_size == 0);
assert(sb->uncompressed_ptr == NULL);
sb->uncompressed_size = serialize_ftnode_partition_size(node,i);
sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size);
if (sb->uncompressed_ptr == NULL) {
assert(sb->uncompressed_size == 0);
sb->uncompressed_size = serialize_ftnode_partition_size(node,i);
sb->uncompressed_ptr = toku_xmalloc(sb->uncompressed_size);
} else {
assert(sb->uncompressed_size > 0);
}
//
// Now put the data into sb->uncompressed_ptr
//
......@@ -549,13 +553,21 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Create an array of OMTVALUE's that store all the pointers to all the data.
// Each element in leafpointers is a pointer to a leaf.
LEAFENTRY *XMALLOC_N(num_alloc, leafpointers);
toku::scoped_malloc leafpointers_buf(sizeof(LEAFENTRY) * num_alloc);
LEAFENTRY *leafpointers = reinterpret_cast<LEAFENTRY *>(leafpointers_buf.get());
leafpointers[0] = NULL;
const void **XMALLOC_N(num_alloc, key_pointers);
uint32_t *XMALLOC_N(num_alloc, key_sizes);
toku::scoped_malloc key_pointers_buf(sizeof(void *) * num_alloc);
const void **key_pointers = reinterpret_cast<const void **>(key_pointers_buf.get());
key_pointers[0] = NULL;
toku::scoped_malloc key_sizes_buf(sizeof(uint32_t) * num_alloc);
uint32_t *key_sizes = reinterpret_cast<uint32_t *>(key_sizes_buf.get());
// Capture pointers to old mempools' buffers (so they can be destroyed)
BASEMENTNODE *XMALLOC_N(num_orig_basements, old_bns);
toku::scoped_malloc old_bns_buf(sizeof(BASEMENTNODE) * num_orig_basements);
BASEMENTNODE *old_bns = reinterpret_cast<BASEMENTNODE *>(old_bns_buf.get());
old_bns[0] = NULL;
uint32_t curr_le = 0;
for (uint32_t i = 0; i < num_orig_basements; i++) {
......@@ -568,22 +580,26 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
// Create an array that will store indexes of new pivots.
// Each element in new_pivots is the index of a pivot key.
// (Allocating num_le of them is overkill, but num_le is an upper bound.)
uint32_t *XMALLOC_N(num_alloc, new_pivots);
toku::scoped_malloc new_pivots_buf(sizeof(uint32_t) * num_alloc);
uint32_t *new_pivots = reinterpret_cast<uint32_t *>(new_pivots_buf.get());
new_pivots[0] = 0;
// Each element in le_sizes is the size of the leafentry pointed to by leafpointers.
size_t *XMALLOC_N(num_alloc, le_sizes);
toku::scoped_malloc le_sizes_buf(sizeof(size_t) * num_alloc);
size_t *le_sizes = reinterpret_cast<size_t *>(le_sizes_buf.get());
le_sizes[0] = 0;
// Create an array that will store the size of each basement.
// This is the sum of the leaf sizes of all the leaves in that basement.
// We don't know how many basements there will be, so we use num_le as the upper bound.
size_t *XMALLOC_N(num_alloc, bn_sizes);
toku::scoped_malloc bn_sizes_buf(sizeof(size_t) * num_alloc);
size_t *bn_sizes = reinterpret_cast<size_t *>(bn_sizes_buf.get());
bn_sizes[0] = 0;
// TODO 4050: All these arrays should be combined into a single array of some bn_info struct (pivot, msize, num_les).
// Each entry is the number of leafentries in this basement. (Again, num_le is overkill upper baound.)
uint32_t *XMALLOC_N(num_alloc, num_les_this_bn);
toku::scoped_malloc num_les_this_bn_buf(sizeof(uint32_t) * num_alloc);
uint32_t *num_les_this_bn = reinterpret_cast<uint32_t *>(num_les_this_bn_buf.get());
num_les_this_bn[0] = 0;
// Figure out the new pivots.
......@@ -696,14 +712,6 @@ rebalance_ftnode_leaf(FTNODE node, unsigned int basementnodesize)
for (uint32_t i = 0; i < num_orig_basements; i++) {
destroy_basement_node(old_bns[i]);
}
toku_free(key_pointers);
toku_free(key_sizes);
toku_free(leafpointers);
toku_free(old_bns);
toku_free(new_pivots);
toku_free(le_sizes);
toku_free(bn_sizes);
toku_free(num_les_this_bn);
} // end of rebalance_ftnode_leaf()
struct serialize_times {
......@@ -737,32 +745,30 @@ toku_create_compressed_partition_from_available(
SUB_BLOCK sb
)
{
struct serialize_times st;
memset(&st, 0, sizeof(st));
tokutime_t t0 = toku_time_now();
serialize_and_compress_partition(node, childnum, compression_method, sb, &st);
toku_ft_status_update_serialize_times(node, st.serialize_time, st.compress_time);
// serialize
sb->uncompressed_size = serialize_ftnode_partition_size(node, childnum);
toku::scoped_malloc uncompressed_buf(sb->uncompressed_size);
sb->uncompressed_ptr = uncompressed_buf.get();
serialize_ftnode_partition(node, childnum, sb);
//
// now we have an sb that would be ready for being written out,
// but we are not writing it out, we are storing it in cache for a potentially
// long time, so we need to do some cleanup
//
// The buffer created above contains metadata in the first 8 bytes, and is overallocated
// It allocates a bound on the compressed length (evaluated before compression) as opposed
// to just the amount of the actual compressed data. So, we create a new buffer and copy
// just the compressed data.
//
uint32_t compressed_size = toku_dtoh32(*(uint32_t *)sb->compressed_ptr);
void* compressed_data = toku_xmalloc(compressed_size);
memcpy(compressed_data, (char *)sb->compressed_ptr + 8, compressed_size);
toku_free(sb->compressed_ptr);
sb->compressed_ptr = compressed_data;
sb->compressed_size = compressed_size;
if (sb->uncompressed_ptr) {
toku_free(sb->uncompressed_ptr);
sb->uncompressed_ptr = NULL;
}
tokutime_t t1 = toku_time_now();
// compress. no need to pad with extra bytes for sizes/xsum - we're not storing them
set_compressed_size_bound(sb, compression_method);
sb->compressed_ptr = toku_xmalloc(sb->compressed_size_bound);
sb->compressed_size = compress_nocrc_sub_block(
sb,
sb->compressed_ptr,
sb->compressed_size_bound,
compression_method
);
sb->uncompressed_ptr = NULL;
tokutime_t t2 = toku_time_now();
toku_ft_status_update_serialize_times(node, t1 - t0, t2 - t1);
}
static void
......@@ -882,7 +888,8 @@ int toku_serialize_ftnode_to_memory(FTNODE node,
// Each partition represents a compressed sub block
// For internal nodes, a sub block is a message buffer
// For leaf nodes, a sub block is a basement node
struct sub_block *XMALLOC_N(npartitions, sb);
toku::scoped_malloc sb_buf(sizeof(struct sub_block) * npartitions);
struct sub_block *sb = reinterpret_cast<struct sub_block *>(sb_buf.get());
XREALLOC_N(npartitions, *ndd);
struct sub_block sb_node_info;
for (int i = 0; i < npartitions; i++) {
......@@ -983,7 +990,6 @@ int toku_serialize_ftnode_to_memory(FTNODE node,
assert(0 == (*n_bytes_to_write)%512);
assert(0 == ((unsigned long long)(*bytes_to_write))%512);
toku_free(sb);
return 0;
}
......@@ -1546,7 +1552,6 @@ deserialize_ftnode_partition(
rb.ndone += data_size;
}
assert(rb.ndone == rb.size);
toku_free(sb->uncompressed_ptr);
exit:
return r;
}
......@@ -1564,6 +1569,8 @@ decompress_and_deserialize_worker(struct rbuf curr_rbuf, struct sub_block curr_s
r = deserialize_ftnode_partition(&curr_sb, node, child, desc, cmp);
}
*decompress_time = t1 - t0;
toku_free(curr_sb.uncompressed_ptr);
return r;
}
......@@ -2452,7 +2459,8 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
uint32_t pad_at_beginning = (node_offset+curr_offset)%512;
uint32_t padded_size = roundup_to_multiple(512, pad_at_beginning + curr_size);
uint8_t *XMALLOC_N_ALIGNED(512, padded_size, raw_block);
toku::scoped_malloc_aligned raw_block_buf(padded_size, 512);
uint8_t *raw_block = reinterpret_cast<uint8_t *>(raw_block_buf.get());
rbuf_init(&rb, pad_at_beginning+raw_block, curr_size);
tokutime_t t0 = toku_time_now();
......@@ -2466,17 +2474,25 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
tokutime_t t1 = toku_time_now();
// decompress
// read sub block
struct sub_block curr_sb;
sub_block_init(&curr_sb);
r = read_and_decompress_sub_block(&rb, &curr_sb);
r = read_compressed_sub_block(&rb, &curr_sb);
if (r != 0) {
return r;
}
invariant(curr_sb.compressed_ptr != NULL);
// decompress
toku::scoped_malloc uncompressed_buf(curr_sb.uncompressed_size);
curr_sb.uncompressed_ptr = uncompressed_buf.get();
toku_decompress((Bytef *) curr_sb.uncompressed_ptr, curr_sb.uncompressed_size,
(Bytef *) curr_sb.compressed_ptr, curr_sb.compressed_size);
// deserialize
tokutime_t t2 = toku_time_now();
if (r == 0) {
// at this point, sb->uncompressed_ptr stores the serialized node partition
r = deserialize_ftnode_partition(&curr_sb, node, childnum, &bfe->h->cmp_descriptor, bfe->h->compare_fun);
}
r = deserialize_ftnode_partition(&curr_sb, node, childnum, &bfe->h->cmp_descriptor, bfe->h->compare_fun);
tokutime_t t3 = toku_time_now();
......@@ -2491,7 +2507,6 @@ toku_deserialize_bp_from_disk(FTNODE node, FTNODE_DISK_DATA ndd, int childnum, i
bfe->bytes_read = rlen;
bfe->io_time = io_time;
toku_free(raw_block);
return r;
}
......@@ -2502,8 +2517,9 @@ toku_deserialize_bp_from_compressed(FTNODE node, int childnum, struct ftnode_fet
assert(BP_STATE(node, childnum) == PT_COMPRESSED);
SUB_BLOCK curr_sb = BSB(node, childnum);
toku::scoped_malloc uncompressed_buf(curr_sb->uncompressed_size);
assert(curr_sb->uncompressed_ptr == NULL);
curr_sb->uncompressed_ptr = toku_xmalloc(curr_sb->uncompressed_size);
curr_sb->uncompressed_ptr = uncompressed_buf.get();
setup_available_ftnode_partition(node, childnum);
BP_STATE(node,childnum) = PT_AVAIL;
......
......@@ -115,6 +115,7 @@ PATENT RIGHTS GRANT:
#include "txn_manager.h"
#include "ule-internal.h"
#include <util/status.h>
#include <util/scoped_malloc.h>
#define ULE_DEBUG 0
......@@ -469,16 +470,10 @@ toku_le_apply_msg(FT_MSG msg,
uint64_t oldmemsize = 0;
uint32_t keylen = ft_msg_get_keylen(msg);
LEAFENTRY copied_old_le = NULL;
bool old_le_malloced = false;
size_t old_le_size = old_leafentry ? leafentry_memsize(old_leafentry) : 0;
toku::scoped_malloc copied_old_le_buf(old_le_size);
if (old_leafentry) {
size_t old_le_size = leafentry_memsize(old_leafentry);
if (old_le_size > 100*1024) { // completely arbitrary limit
CAST_FROM_VOIDP(copied_old_le, toku_malloc(old_le_size));
old_le_malloced = true;
}
else {
CAST_FROM_VOIDP(copied_old_le, alloca(old_le_size));
}
CAST_FROM_VOIDP(copied_old_le, copied_old_le_buf.get());
memcpy(copied_old_le, old_leafentry, old_le_size);
}
......@@ -506,9 +501,6 @@ toku_le_apply_msg(FT_MSG msg,
}
*numbytes_delta_p = newnumbytes - oldnumbytes;
ule_cleanup(&ule);
if (old_le_malloced) {
toku_free(copied_old_le);
}
}
bool toku_le_worth_running_garbage_collection(LEAFENTRY le, TXNID oldest_referenced_xid_known) {
......@@ -564,16 +556,10 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
int64_t oldnumbytes = 0;
int64_t newnumbytes = 0;
LEAFENTRY copied_old_le = NULL;
bool old_le_malloced = false;
size_t old_le_size = old_leaf_entry ? leafentry_memsize(old_leaf_entry) : 0;
toku::scoped_malloc copied_old_le_buf(old_le_size);
if (old_leaf_entry) {
size_t old_le_size = leafentry_memsize(old_leaf_entry);
if (old_le_size > 100*1024) { // completely arbitrary limit
CAST_FROM_VOIDP(copied_old_le, toku_malloc(old_le_size));
old_le_malloced = true;
}
else {
CAST_FROM_VOIDP(copied_old_le, alloca(old_le_size));
}
CAST_FROM_VOIDP(copied_old_le, copied_old_le_buf.get());
memcpy(copied_old_le, old_leaf_entry, old_le_size);
}
......@@ -607,9 +593,6 @@ toku_le_garbage_collect(LEAFENTRY old_leaf_entry,
}
*numbytes_delta_p = newnumbytes - oldnumbytes;
ule_cleanup(&ule);
if (old_le_malloced) {
toku_free(copied_old_le);
}
}
/////////////////////////////////////////////////////////////////////////////////
......
......@@ -4,6 +4,7 @@ set(util_srcs
mempool
partitioned_counter
threadpool
scoped_malloc
)
add_library(util SHARED ${util_srcs})
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuDB, Tokutek Fractal Tree Indexing Library.
Copyright (C) 2007-2013 Tokutek, Inc.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
granted to you under this License shall terminate as of the date
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
under this License.
*/
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <pthread.h>
#include <toku_include/memory.h>
#include <util/scoped_malloc.h>
namespace toku {
// see pthread_key handling at the bottom
//
// when we use gcc 4.8, we can use the 'thread_local' keyword and proper
// c++ constructors/destructors instead of this pthread wizardy.
static pthread_key_t tl_stack_destroy_pthread_key;
class tl_stack {
// 1MB
static const size_t STACK_SIZE = 1 * 1024 * 1024;
public:
void init() {
m_stack = reinterpret_cast<char *>(toku_xmalloc(STACK_SIZE));
m_current_offset = 0;
int r = pthread_setspecific(tl_stack_destroy_pthread_key, this);
invariant_zero(r);
}
void destroy() {
if (m_stack != NULL) {
toku_free(m_stack);
m_stack = NULL;
}
}
// Allocate 'size' bytes and return a pointer to the first byte
void *alloc(const size_t size) {
if (m_stack == NULL) {
init();
}
invariant(m_current_offset + size <= STACK_SIZE);
void *mem = &m_stack[m_current_offset];
m_current_offset += size;
return mem;
}
// Give back a previously allocated region of 'size' bytes.
void dealloc(const size_t size) {
invariant(m_current_offset >= size);
m_current_offset -= size;
}
// Get the current size of free-space in bytes.
size_t get_free_space() const {
invariant(m_current_offset <= STACK_SIZE);
return STACK_SIZE - m_current_offset;
}
private:
// Offset of the free region in the stack
size_t m_current_offset;
char *m_stack;
};
// Each thread has its own local stack.
static __thread tl_stack local_stack;
// Memory is allocated from thread-local storage if available, otherwise from malloc(1).
scoped_malloc::scoped_malloc(const size_t size) :
m_size(size),
m_local(local_stack.get_free_space() >= m_size),
m_buf(m_local ? local_stack.alloc(m_size) : toku_xmalloc(m_size)) {
}
scoped_malloc::~scoped_malloc() {
if (m_local) {
local_stack.dealloc(m_size);
} else {
toku_free(m_buf);
}
}
} // namespace toku
// pthread key handling:
// - there is a process-wide pthread key that is associated with the destructor for a tl_stack
// - on process construction, we initialize the key; on destruction, we clean it up.
// - when a thread first uses its tl_stack, it calls pthread_setspecific(&destroy_key, "some key"),
// associating the destroy key with the tl_stack_destroy destructor
// - when a thread terminates, it calls the associated destructor; tl_stack_destroy.
static void tl_stack_destroy(void *key) {
invariant_notnull(key);
toku::tl_stack *st = reinterpret_cast<toku::tl_stack *>(key);
st->destroy();
}
void toku_scoped_malloc_init(void) {
int r = pthread_key_create(&toku::tl_stack_destroy_pthread_key, tl_stack_destroy);
invariant_zero(r);
}
void toku_scoped_malloc_destroy(void) {
// We're deregistering the destructor key here. When this thread exits,
// the tl_stack destructor won't get called, so we need to do that first.
tl_stack_destroy(&toku::local_stack);
int r = pthread_key_delete(toku::tl_stack_destroy_pthread_key);
invariant_zero(r);
}
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuDB, Tokutek Fractal Tree Indexing Library.
Copyright (C) 2007-2013 Tokutek, Inc.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
granted to you under this License shall terminate as of the date
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
under this License.
*/
#pragma once
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <string.h>
namespace toku {
class scoped_malloc {
public:
// Memory is allocated from thread-local storage if available, otherwise from malloc(3).
scoped_malloc(const size_t size);
~scoped_malloc();
void *get() const {
return m_buf;
}
private:
// Non-copyable
scoped_malloc();
const size_t m_size;
const bool m_local;
void *const m_buf;
};
class scoped_calloc : public scoped_malloc {
public:
// A scoped malloc whose bytes are initialized to zero, as in calloc(3)
scoped_calloc(const size_t size) :
scoped_malloc(size) {
memset(scoped_malloc::get(), 0, size);
}
};
class scoped_malloc_aligned : public scoped_malloc {
public:
scoped_malloc_aligned(const size_t size, const size_t alignment) :
scoped_malloc(size + alignment) {
invariant(size >= alignment);
invariant(alignment > 0);
const uintptr_t addr = reinterpret_cast<uintptr_t>(scoped_malloc::get());
const uintptr_t aligned_addr = (addr + alignment) - (addr % alignment);
invariant(aligned_addr < addr + size + alignment);
m_aligned_buf = reinterpret_cast<char *>(aligned_addr);
}
void *get() const {
return m_aligned_buf;
}
private:
void *m_aligned_buf;
};
} // namespace toku
void toku_scoped_malloc_init(void);
void toku_scoped_malloc_destroy(void);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment