Commit 0cb23fd9 authored by John Esmet's avatar John Esmet

FT-271 Move block allocator code into a class.

parent 6fd626d6
......@@ -89,109 +89,69 @@ PATENT RIGHTS GRANT:
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#ident "$Id$"
#include "block_allocator.h"
#include <memory.h>
#include <toku_assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <cstring>
#include "portability/memory.h"
#include "portability/toku_assert.h"
#include "portability/toku_stdint.h"
#include "portability/toku_stdlib.h"
#include "ft/block_allocator.h"
// Here's a very simple implementation.
// It's not very fast at allocating or freeing.
// Previous implementation used next_fit, but now use first_fit since we are moving blocks around to reduce file size.
struct block_allocator {
uint64_t reserve_at_beginning; // How much to reserve at the beginning
uint64_t alignment; // Block alignment
uint64_t n_blocks; // How many blocks
uint64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks.
struct block_allocator_blockpair *blocks_array; // These blocks are sorted by address.
uint64_t n_bytes_in_use; // including the reserve_at_beginning
};
void
block_allocator_validate (BLOCK_ALLOCATOR ba) {
uint64_t i;
uint64_t n_bytes_in_use = ba->reserve_at_beginning;
for (i=0; i<ba->n_blocks; i++) {
n_bytes_in_use += ba->blocks_array[i].size;
if (i>0) {
assert(ba->blocks_array[i].offset > ba->blocks_array[i-1].offset);
assert(ba->blocks_array[i].offset >= ba->blocks_array[i-1].offset + ba->blocks_array[i-1].size );
}
}
assert(n_bytes_in_use == ba->n_bytes_in_use);
}
#if 0
#define VALIDATE(b) block_allocator_validate(b)
#define VALIDATE() validate()
#else
#define VALIDATE(b) ((void)0)
#define VALIDATE()
#endif
#if 0
void
block_allocator_print (BLOCK_ALLOCATOR ba) {
uint64_t i;
for (i=0; i<ba->n_blocks; i++) {
printf("%" PRId64 ":%" PRId64 " ", ba->blocks_array[i].offset, ba->blocks_array[i].size);
}
printf("\n");
VALIDATE(ba);
}
#endif
void block_allocator::create(uint64_t reserve_at_beginning, uint64_t alignment) {
// the alignment must be at least 512 and aligned with 512 to work with direct I/O
assert(alignment >= 512 && (alignment % 512) == 0);
_reserve_at_beginning = reserve_at_beginning;
_alignment = alignment;
_n_blocks = 0;
_blocks_array_size = 1;
XMALLOC_N(_blocks_array_size, _blocks_array);
_n_bytes_in_use = reserve_at_beginning;
void
create_block_allocator (BLOCK_ALLOCATOR *ba, uint64_t reserve_at_beginning, uint64_t alignment) {
assert(alignment>=512 && 0==(alignment%512)); // the alignment must be at least 512 and aligned with 512 to make DIRECT_IO happy.
BLOCK_ALLOCATOR XMALLOC(result);
result->reserve_at_beginning = reserve_at_beginning;
result->alignment = alignment;
result->n_blocks = 0;
result->blocks_array_size = 1;
XMALLOC_N(result->blocks_array_size, result->blocks_array);
result->n_bytes_in_use = reserve_at_beginning;
*ba = result;
VALIDATE(result);
VALIDATE();
}
void
destroy_block_allocator (BLOCK_ALLOCATOR *bap) {
BLOCK_ALLOCATOR ba = *bap;
*bap = 0;
toku_free(ba->blocks_array);
toku_free(ba);
void block_allocator::destroy() {
toku_free(_blocks_array);
}
static void
grow_blocks_array_by (BLOCK_ALLOCATOR ba, uint64_t n_to_add) {
if (ba->n_blocks + n_to_add > ba->blocks_array_size) {
uint64_t new_size = ba->n_blocks + n_to_add;
uint64_t at_least = ba->blocks_array_size * 2;
void block_allocator::grow_blocks_array_by(uint64_t n_to_add) {
if (_n_blocks + n_to_add > _blocks_array_size) {
uint64_t new_size = _n_blocks + n_to_add;
uint64_t at_least = _blocks_array_size * 2;
if (at_least > new_size) {
new_size = at_least;
}
ba->blocks_array_size = new_size;
XREALLOC_N(ba->blocks_array_size, ba->blocks_array);
_blocks_array_size = new_size;
XREALLOC_N(_blocks_array_size, _blocks_array);
}
}
static void
grow_blocks_array (BLOCK_ALLOCATOR ba) {
grow_blocks_array_by(ba, 1);
void block_allocator::grow_blocks_array() {
grow_blocks_array_by(1);
}
void
block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
uint64_t s, const struct block_allocator_blockpair src[/*s*/])
void block_allocator::merge_blockpairs_into(uint64_t d, struct blockpair dst[],
uint64_t s, const struct blockpair src[])
{
uint64_t tail = d+s;
while (d>0 && s>0) {
struct block_allocator_blockpair *dp = &dst[d-1];
struct block_allocator_blockpair const *sp = &src[s-1];
struct block_allocator_blockpair *tp = &dst[tail-1];
assert(tail>0);
while (d > 0 && s > 0) {
struct blockpair *dp = &dst[d - 1];
struct blockpair const *sp = &src[s - 1];
struct blockpair *tp = &dst[tail - 1];
assert(tail > 0);
if (dp->offset > sp->offset) {
*tp = *dp;
d--;
......@@ -202,139 +162,143 @@ block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_
tail--;
}
}
while (d>0) {
struct block_allocator_blockpair *dp = &dst[d-1];
struct block_allocator_blockpair *tp = &dst[tail-1];
while (d > 0) {
struct blockpair *dp = &dst[d - 1];
struct blockpair *tp = &dst[tail - 1];
*tp = *dp;
d--;
tail--;
}
while (s>0) {
struct block_allocator_blockpair const *sp = &src[s-1];
struct block_allocator_blockpair *tp = &dst[tail-1];
while (s > 0) {
struct blockpair const *sp = &src[s - 1];
struct blockpair *tp = &dst[tail - 1];
*tp = *sp;
s--;
tail--;
}
}
static int
compare_blockpairs (const void *av, const void *bv) {
const struct block_allocator_blockpair *a = (const struct block_allocator_blockpair *) av;
const struct block_allocator_blockpair *b = (const struct block_allocator_blockpair *) bv;
if (a->offset < b->offset) return -1;
if (a->offset > b->offset) return +1;
return 0;
int block_allocator::compare_blockpairs(const void *av, const void *bv) {
const struct blockpair *a = (const struct blockpair *) av;
const struct blockpair *b = (const struct blockpair *) bv;
if (a->offset < b->offset) {
return -1;
} else if (a->offset > b->offset) {
return 1;
} else {
return 0;
}
}
void
block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/])
// See the documentation in block_allocator.h
{
VALIDATE(ba);
void block_allocator::alloc_blocks_at(uint64_t n_blocks, struct blockpair pairs[]) {
VALIDATE();
qsort(pairs, n_blocks, sizeof(*pairs), compare_blockpairs);
for (uint64_t i=0; i<n_blocks; i++) {
assert(pairs[i].offset >= ba->reserve_at_beginning);
assert(pairs[i].offset%ba->alignment == 0);
ba->n_bytes_in_use += pairs[i].size;
invariant(pairs[i].size > 0); //Allocator does not support size 0 blocks. See block_allocator_free_block.
for (uint64_t i = 0; i < n_blocks; i++) {
assert(pairs[i].offset >= _reserve_at_beginning);
assert(pairs[i].offset % _alignment == 0);
_n_bytes_in_use += pairs[i].size;
// Allocator does not support size 0 blocks. See block_allocator_free_block.
invariant(pairs[i].size > 0);
}
grow_blocks_array_by(ba, n_blocks);
block_allocator_merge_blockpairs_into(ba->n_blocks, ba->blocks_array,
n_blocks, pairs);
ba->n_blocks += n_blocks;
VALIDATE(ba);
grow_blocks_array_by(n_blocks);
merge_blockpairs_into(_n_blocks, _blocks_array, n_blocks, pairs);
_n_blocks += n_blocks;
VALIDATE();
}
void
block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) {
struct block_allocator_blockpair p = {.offset = offset, .size=size};
void block_allocator::alloc_block_at(uint64_t size, uint64_t offset) {
struct blockpair p(offset, size);
// Just do a linear search for the block.
// This data structure is a sorted array (no gaps or anything), so the search isn't really making this any slower than the insertion.
// To speed up the insertion when opening a file, we provide the block_allocator_alloc_blocks_at function.
block_allocator_alloc_blocks_at(ba, 1, &p);
alloc_blocks_at(1, &p);
}
static inline uint64_t
align (uint64_t value, BLOCK_ALLOCATOR ba)
// Effect: align a value by rounding up.
{
return ((value+ba->alignment-1)/ba->alignment)*ba->alignment;
static inline uint64_t align(uint64_t value, uint64_t ba_alignment) {
return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment;
}
void block_allocator_alloc_block(BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset)
// Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512).
{
invariant(size > 0); //Allocator does not support size 0 blocks. See block_allocator_free_block.
grow_blocks_array(ba);
ba->n_bytes_in_use += size;
if (ba->n_blocks==0) {
assert(ba->n_bytes_in_use == ba->reserve_at_beginning + size); // we know exactly how many are in use
ba->blocks_array[0].offset = align(ba->reserve_at_beginning, ba);
ba->blocks_array[0].size = size;
*offset = ba->blocks_array[0].offset;
ba->n_blocks++;
void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
// Allocator does not support size 0 blocks. See block_allocator_free_block.
invariant(size > 0);
grow_blocks_array();
_n_bytes_in_use += size;
if (_n_blocks == 0) {
assert(_n_bytes_in_use == _reserve_at_beginning + size); // we know exactly how many are in use
_blocks_array[0].offset = align(_reserve_at_beginning, _alignment);
_blocks_array[0].size = size;
*offset = _blocks_array[0].offset;
_n_blocks++;
return;
}
// Implement first fit.
{
uint64_t end_of_reserve = align(ba->reserve_at_beginning, ba);
if (end_of_reserve + size <= ba->blocks_array[0].offset ) {
uint64_t end_of_reserve = align(_reserve_at_beginning, _alignment);
if (end_of_reserve + size <= _blocks_array[0].offset ) {
// Check to see if the space immediately after the reserve is big enough to hold the new block.
struct block_allocator_blockpair *bp = &ba->blocks_array[0];
memmove(bp+1, bp, (ba->n_blocks)*sizeof(*bp));
struct blockpair *bp = &_blocks_array[0];
memmove(bp + 1, bp, _n_blocks * sizeof(*bp));
bp[0].offset = end_of_reserve;
bp[0].size = size;
ba->n_blocks++;
bp[0].size = size;
_n_blocks++;
*offset = end_of_reserve;
VALIDATE(ba);
VALIDATE();
return;
}
}
for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
for (uint64_t blocknum = 0; blocknum + 1 < _n_blocks; blocknum++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
struct blockpair *bp = &_blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t answer_offset = align(this_offset + this_size, ba);
if (answer_offset + size > bp[1].offset) continue; // The block we want doesn't fit after this block.
uint64_t answer_offset = align(this_offset + this_size, _alignment);
if (answer_offset + size > bp[1].offset) {
continue; // The block we want doesn't fit after this block.
}
// It fits, so allocate it here.
memmove(bp+2, bp+1, (ba->n_blocks - blocknum -1)*sizeof(*bp));
memmove(bp + 2, bp + 1, (_n_blocks - blocknum - 1) * sizeof(*bp));
bp[1].offset = answer_offset;
bp[1].size = size;
ba->n_blocks++;
bp[1].size = size;
_n_blocks++;
*offset = answer_offset;
VALIDATE(ba);
VALIDATE();
return;
}
// It didn't fit anywhere, so fit it on the end.
assert(ba->n_blocks < ba->blocks_array_size);
struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks];
uint64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba);
assert(_n_blocks < _blocks_array_size);
struct blockpair *bp = &_blocks_array[_n_blocks];
uint64_t answer_offset = align(bp[-1].offset + bp[-1].size, _alignment);
bp->offset = answer_offset;
bp->size = size;
ba->n_blocks++;
bp->size = size;
_n_blocks++;
*offset = answer_offset;
VALIDATE(ba);
VALIDATE();
}
static int64_t
find_block (BLOCK_ALLOCATOR ba, uint64_t offset)
// Find the index in the blocks array that has a particular offset. Requires that the block exist.
// Use binary search so it runs fast.
{
VALIDATE(ba);
if (ba->n_blocks==1) {
assert(ba->blocks_array[0].offset == offset);
int64_t block_allocator::find_block(uint64_t offset) {
VALIDATE();
if (_n_blocks == 1) {
assert(_blocks_array[0].offset == offset);
return 0;
}
uint64_t lo = 0;
uint64_t hi = ba->n_blocks;
uint64_t hi = _n_blocks;
while (1) {
assert(lo<hi); // otherwise no such block exists.
uint64_t mid = (lo+hi)/2;
uint64_t thisoff = ba->blocks_array[mid].offset;
//printf("lo=%" PRId64 " hi=%" PRId64 " mid=%" PRId64 " thisoff=%" PRId64 " offset=%" PRId64 "\n", lo, hi, mid, thisoff, offset);
uint64_t thisoff = _blocks_array[mid].offset;
if (thisoff < offset) {
lo = mid+1;
} else if (thisoff > offset) {
......@@ -350,69 +314,64 @@ find_block (BLOCK_ALLOCATOR ba, uint64_t offset)
// a 0-sized block can share offset with a non-zero sized block.
// The non-zero sized block is not exchangable with a zero sized block (or vice versa),
// so inserting 0-sized blocks can cause corruption here.
void
block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset) {
VALIDATE(ba);
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
ba->n_bytes_in_use -= ba->blocks_array[bn].size;
memmove(&ba->blocks_array[bn], &ba->blocks_array[bn+1], (ba->n_blocks-bn-1) * sizeof(struct block_allocator_blockpair));
ba->n_blocks--;
VALIDATE(ba);
void block_allocator::free_block(uint64_t offset) {
VALIDATE();
int64_t bn = find_block(offset);
assert(bn >= 0); // we require that there is a block with that offset.
_n_bytes_in_use -= _blocks_array[bn].size;
memmove(&_blocks_array[bn], &_blocks_array[bn +1 ],
(_n_blocks - bn - 1) * sizeof(struct blockpair));
_n_blocks--;
VALIDATE();
}
uint64_t
block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset) {
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
return ba->blocks_array[bn].size;
uint64_t block_allocator::block_size(uint64_t offset) {
int64_t bn = find_block(offset);
assert(bn >=0); // we require that there is a block with that offset.
return _blocks_array[bn].size;
}
uint64_t
block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) {
if (ba->n_blocks==0) return ba->reserve_at_beginning;
else {
struct block_allocator_blockpair *last = &ba->blocks_array[ba->n_blocks-1];
uint64_t block_allocator::allocated_limit() const {
if (_n_blocks == 0) {
return _reserve_at_beginning;
} else {
struct blockpair *last = &_blocks_array[_n_blocks - 1];
return last->offset + last->size;
}
}
int
block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size)
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
{
if (b==0) {
*offset=0;
*size =ba->reserve_at_beginning;
int block_allocator::get_nth_block_in_layout_order(uint64_t b, uint64_t *offset, uint64_t *size) {
if (b ==0 ) {
*offset = 0;
*size = _reserve_at_beginning;
return 0;
} else if (b > ba->n_blocks) {
} else if (b > _n_blocks) {
return -1;
} else {
*offset=ba->blocks_array[b-1].offset;
*size =ba->blocks_array[b-1].size;
*offset =_blocks_array[b - 1].offset;
*size =_blocks_array[b - 1].size;
return 0;
}
}
void
block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report) {
//Requires: report->file_size_bytes is filled in
//Requires: report->data_bytes is filled in
//Requires: report->checkpoint_bytes_additional is filled in
assert(ba->n_bytes_in_use == report->data_bytes + report->checkpoint_bytes_additional);
// Requires: report->file_size_bytes is filled in
// Requires: report->data_bytes is filled in
// Requires: report->checkpoint_bytes_additional is filled in
void block_allocator::get_unused_statistics(TOKU_DB_FRAGMENTATION report) {
assert(_n_bytes_in_use == report->data_bytes + report->checkpoint_bytes_additional);
report->unused_bytes = 0;
report->unused_blocks = 0;
report->unused_bytes = 0;
report->unused_blocks = 0;
report->largest_unused_block = 0;
if (ba->n_blocks > 0) {
if (_n_blocks > 0) {
//Deal with space before block 0 and after reserve:
{
struct block_allocator_blockpair *bp = &ba->blocks_array[0];
assert(bp->offset >= align(ba->reserve_at_beginning, ba));
uint64_t free_space = bp->offset - align(ba->reserve_at_beginning, ba);
struct blockpair *bp = &_blocks_array[0];
assert(bp->offset >= align(_reserve_at_beginning, _alignment));
uint64_t free_space = bp->offset - align(_reserve_at_beginning, _alignment);
if (free_space > 0) {
report->unused_bytes += free_space;
report->unused_blocks++;
......@@ -423,12 +382,12 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
}
//Deal with space between blocks:
for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
for (uint64_t blocknum = 0; blocknum +1 < _n_blocks; blocknum ++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
struct blockpair *bp = &_blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t end_of_this_block = align(this_offset+this_size, ba);
uint64_t end_of_this_block = align(this_offset+this_size, _alignment);
uint64_t next_offset = bp[1].offset;
uint64_t free_space = next_offset - end_of_this_block;
if (free_space > 0) {
......@@ -442,10 +401,10 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
//Deal with space after last block
{
struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks-1];
struct blockpair *bp = &_blocks_array[_n_blocks-1];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t end_of_this_block = align(this_offset+this_size, ba);
uint64_t end_of_this_block = align(this_offset+this_size, _alignment);
if (end_of_this_block < report->file_size_bytes) {
uint64_t free_space = report->file_size_bytes - end_of_this_block;
assert(free_space > 0);
......@@ -456,10 +415,9 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
}
}
}
}
else {
//No blocks. Just the reserve.
uint64_t end_of_this_block = align(ba->reserve_at_beginning, ba);
} else {
// No blocks. Just the reserve.
uint64_t end_of_this_block = align(_reserve_at_beginning, _alignment);
if (end_of_this_block < report->file_size_bytes) {
uint64_t free_space = report->file_size_bytes - end_of_this_block;
assert(free_space > 0);
......@@ -471,3 +429,15 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
}
}
}
void block_allocator::validate() const {
uint64_t n_bytes_in_use = _reserve_at_beginning;
for (uint64_t i = 0; i < _n_blocks; i++) {
n_bytes_in_use += _blocks_array[i].size;
if (i > 0) {
assert(_blocks_array[i].offset > _blocks_array[i - 1].offset);
assert(_blocks_array[i].offset >= _blocks_array[i - 1].offset + _blocks_array[i - 1].size );
}
}
assert(n_bytes_in_use == _n_bytes_in_use);
}
......@@ -92,133 +92,146 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "fttypes.h"
#include <db.h>
#define BLOCK_ALLOCATOR_ALIGNMENT 4096
// How much must be reserved at the beginning for the block?
// The actual header is 8+4+4+8+8_4+8+ the length of the db names + 1 pointer for each root.
// So 4096 should be enough.
#define BLOCK_ALLOCATOR_HEADER_RESERVE 4096
#if (BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT) != 0
#error
#endif
#include "portability/toku_stdint.h"
// Block allocator.
// Overview: A block allocator manages the allocation of variable-sized blocks.
//
// A block allocator manages the allocation of variable-sized blocks.
// The translation of block numbers to addresses is handled elsewhere.
// The allocation of block numbers is handled elsewhere.
// We can create a block allocator.
//
// When creating a block allocator we also specify a certain-sized
// block at the beginning that is preallocated (and cannot be allocated
// or freed)
// block at the beginning that is preallocated (and cannot be allocated or freed)
//
// We can allocate blocks of a particular size at a particular location.
// We can allocate blocks of a particular size at a location chosen by the allocator.
// We can free blocks.
// We can determine the size of a block.
#define BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE (2*BLOCK_ALLOCATOR_HEADER_RESERVE)
typedef struct block_allocator *BLOCK_ALLOCATOR;
void create_block_allocator (BLOCK_ALLOCATOR * ba, uint64_t reserve_at_beginning, uint64_t alignment);
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory.
// Parameters
// ba (OUT): Result stored here.
// reserve_at_beginning (IN) Size of reserved block at beginning. This size does not have to be aligned.
// alignment (IN) Block alignment.
void destroy_block_allocator (BLOCK_ALLOCATOR *ba);
// Effect: Destroy a block allocator at *ba.
// Also, set *ba=NULL.
// Rationale: If there was only one copy of the pointer, this kills that copy too.
// Paramaters:
// ba (IN/OUT):
void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset);
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
// Usage note: To allocate several blocks (e.g., when opening a FT), use block_allocator_alloc_blocks_at().
// Requires: The resulting block may not overlap any other allocated block.
// And the offset must be a multiple of the block alignment.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// size (IN): The size of the block.
// offset (IN): The location of the block.
struct block_allocator_blockpair {
uint64_t offset;
uint64_t size;
class block_allocator {
public:
static const size_t BLOCK_ALLOCATOR_ALIGNMENT = 4096;
// How much must be reserved at the beginning for the block?
// The actual header is 8+4+4+8+8_4+8+ the length of the db names + 1 pointer for each root.
// So 4096 should be enough.
static const size_t BLOCK_ALLOCATOR_HEADER_RESERVE = 4096;
static_assert(BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT == 0,
"block allocator header must have proper alignment");
static const size_t BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE = BLOCK_ALLOCATOR_HEADER_RESERVE * 2;
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory.
// Parameters
// reserve_at_beginning (IN) Size of reserved block at beginning. This size does not have to be aligned.
// alignment (IN) Block alignment.
void create(uint64_t reserve_at_beginning, uint64_t alignment);
// Effect: Destroy this block allocator
void destroy();
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
// Usage note: To allocate several blocks (e.g., when opening a FT), use block_allocator_alloc_blocks_at().
// Requires: The resulting block may not overlap any other allocated block.
// And the offset must be a multiple of the block alignment.
// Parameters:
// size (IN): The size of the block.
// offset (IN): The location of the block.
void alloc_block_at(uint64_t size, uint64_t offset);
struct blockpair {
uint64_t offset;
uint64_t size;
blockpair(uint64_t o, uint64_t s) :
offset(o), size(s) {
}
};
// Effect: Take pairs in any order, and add them all, as if we did block_allocator_alloc_block() on each pair.
// This should run in time O(N + M log M) where N is the number of blocks in ba, and M is the number of new blocks.
// Modifies: pairs (sorts them).
void alloc_blocks_at(uint64_t n_blocks, blockpair *pairs);
// Effect: Allocate a block of the specified size at an address chosen by the allocator.
// Aborts if anything goes wrong.
// The block address will be a multiple of the alignment.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// size (IN): The size of the block. (The size does not have to be aligned.)
// offset (OUT): The location of the block.
void alloc_block(uint64_t size, uint64_t *offset);
// Effect: Free the block at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// offset (IN): The offset of the block.
void free_block(uint64_t offset);
// Effect: Return the size of the block that starts at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// offset (IN): The offset of the block.
uint64_t block_size(uint64_t offset);
// Effect: Check to see if the block allocator is OK. This may take a long time.
// Usage Hints: Probably only use this for unit tests.
// TODO: Private?
void validate() const;
// Effect: Return the unallocated block address of "infinite" size.
// That is, return the smallest address that is above all the allocated blocks.
uint64_t allocated_limit() const;
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
// Rationale: This is probably useful only for tests.
int get_nth_block_in_layout_order(uint64_t b, uint64_t *offset, uint64_t *size);
// Effect: Fill in report to indicate how the file is used.
// Requires:
// report->file_size_bytes is filled in
// report->data_bytes is filled in
// report->checkpoint_bytes_additional is filled in
void get_unused_statistics(TOKU_DB_FRAGMENTATION report);
// Effect: Merge dst[d] and src[s] into dst[d+s], merging in place.
// Initially dst and src hold sorted arrays (sorted by increasing offset).
// Finally dst contains all d+s elements sorted in order.
// Requires:
// dst and src are sorted.
// dst must be large enough (sizeof(dst) >= d && sizeof(src) >= s)
// No blocks may overlap.
// Rationale: This is exposed so it can be tested by a glass box tester.
static void merge_blockpairs_into(uint64_t d, struct blockpair dst[],
uint64_t s, const struct blockpair src[]);
private:
void grow_blocks_array_by(uint64_t n_to_add);
void grow_blocks_array();
int64_t find_block(uint64_t offset);
static int compare_blockpairs(const void *av, const void *bv);
// How much to reserve at the beginning
uint64_t _reserve_at_beginning;
// Block alignment
uint64_t _alignment;
// How many blocks
uint64_t _n_blocks;
// How big is the blocks_array. Must be >= n_blocks.
uint64_t _blocks_array_size;
// These blocks are sorted by address.
struct blockpair *_blocks_array;
// Including the reserve_at_beginning
uint64_t _n_bytes_in_use;
};
void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair *pairs);
// Effect: Take pairs in any order, and add them all, as if we did block_allocator_alloc_block() on each pair.
// This should run in time O(N + M log M) where N is the number of blocks in ba, and M is the number of new blocks.
// Modifies: pairs (sorts them).
void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset);
// Effect: Allocate a block of the specified size at an address chosen by the allocator.
// Aborts if anything goes wrong.
// The block address will be a multiple of the alignment.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// size (IN): The size of the block. (The size does not have to be aligned.)
// offset (OUT): The location of the block.
void block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Free the block at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// offset (IN): The offset of the block.
uint64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Return the size of the block that starts at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
// ba (IN/OUT): The block allocator. (Modifies ba.)
// offset (IN): The offset of the block.
void block_allocator_validate (BLOCK_ALLOCATOR ba);
// Effect: Check to see if the block allocator is OK. This may take a long time.
// Usage Hints: Probably only use this for unit tests.
void block_allocator_print (BLOCK_ALLOCATOR ba);
// Effect: Print information about the block allocator.
// Rationale: This is probably useful only for debugging.
uint64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba);
// Effect: Return the unallocated block address of "infinite" size.
// That is, return the smallest address that is above all the allocated blocks.
int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size);
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
// Rationale: This is probably useful only for tests.
void block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION report);
// Effect: Fill in report to indicate how the file is used.
// Requires:
// report->file_size_bytes is filled in
// report->data_bytes is filled in
// report->checkpoint_bytes_additional is filled in
void block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
uint64_t s, const struct block_allocator_blockpair src[/*s*/]);
// Effect: Merge dst[d] and src[s] into dst[d+s], merging in place.
// Initially dst and src hold sorted arrays (sorted by increasing offset).
// Finally dst contains all d+s elements sorted in order.
// Requires:
// dst and src are sorted.
// dst must be large enough.
// No blocks may overlap.
// Rationale: This is exposed so it can be tested by a glass box tester. Otherwise it would be static (file-scope) function inside block_allocator.c
......@@ -89,20 +89,21 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <toku_portability.h>
#include "ft-internal.h" // ugly but pragmatic, need access to dirty bits while holding translation lock
#include "fttypes.h"
#include "block_table.h"
#include "memory.h"
#include "toku_assert.h"
#include <toku_pthread.h>
#include "block_allocator.h"
#include "rbuf.h"
#include "wbuf.h"
#include <util/nb_mutex.h>
#include "portability/toku_portability.h"
#include "portability/memory.h"
#include "portability/toku_assert.h"
#include "portability/toku_pthread.h"
#include "ft/block_allocator.h"
#include "ft/block_table.h"
#include "ft/ft-internal.h" // ugly but pragmatic, need access to dirty bits while holding translation lock
#include "ft/fttypes.h"
// TODO: reorganize this dependency
#include "ft/ft-ops.h" // for toku_maybe_truncate_file
#include "ft/rbuf.h"
#include "ft/wbuf.h"
#include "util/nb_mutex.h"
//When the translation (btt) is stored on disk:
// In Header:
......@@ -157,8 +158,8 @@ struct block_table {
struct translation checkpointed; // the translation for the data that shall remain inviolate on disk until the next checkpoint finishes, after which any blocks used only in this translation can be freed.
// The in-memory data structure for block allocation. There is no on-disk data structure for block allocation.
// Note: This is *allocation* not *translation*. The block_allocator is unaware of which blocks are used for which translation, but simply allocates and deallocates blocks.
BLOCK_ALLOCATOR block_allocator;
// Note: This is *allocation* not *translation*. The bt_block_allocator is unaware of which blocks are used for which translation, but simply allocates and deallocates blocks.
block_allocator bt_block_allocator;
toku_mutex_t mutex;
struct nb_mutex safe_file_size_lock;
bool checkpoint_skipped;
......@@ -189,7 +190,7 @@ ft_set_dirty(FT ft, bool for_checkpoint){
static void
maybe_truncate_file(BLOCK_TABLE bt, int fd, uint64_t size_needed_before) {
toku_mutex_assert_locked(&bt->mutex);
uint64_t new_size_needed = block_allocator_allocated_limit(bt->block_allocator);
uint64_t new_size_needed = bt->bt_block_allocator.allocated_limit();
//Save a call to toku_os_get_file_size (kernel call) if unlikely to be useful.
if (new_size_needed < size_needed_before && new_size_needed < bt->safe_file_size) {
nb_mutex_lock(&bt->safe_file_size_lock, &bt->mutex);
......@@ -308,10 +309,6 @@ toku_block_translation_note_start_checkpoint_unlocked (BLOCK_TABLE bt) {
bt->checkpoint_skipped = false;
}
//#define PRNTF(str, b, siz, ad, bt) printf("%s[%d] %s %" PRId64 " %" PRId64 " %" PRId64 "\n", __FUNCTION__, __LINE__, str, b, siz, ad); fflush(stdout); if (bt) block_allocator_validate(((BLOCK_TABLE)(bt))->block_allocator);
//Debugging function
#define PRNTF(str, b, siz, ad, bt)
void toku_block_translation_note_skipped_checkpoint (BLOCK_TABLE bt) {
//Purpose, alert block translation that the checkpoint was skipped, e.x. for a non-dirty header
lock_for_blocktable(bt);
......@@ -334,7 +331,7 @@ void
toku_block_translation_note_end_checkpoint (BLOCK_TABLE bt, int fd) {
// Free unused blocks
lock_for_blocktable(bt);
uint64_t allocated_limit_at_start = block_allocator_allocated_limit(bt->block_allocator);
uint64_t allocated_limit_at_start = bt->bt_block_allocator.allocated_limit();
paranoid_invariant_notnull(bt->inprogress.block_translation);
if (bt->checkpoint_skipped) {
toku_free(bt->inprogress.block_translation);
......@@ -354,8 +351,7 @@ toku_block_translation_note_end_checkpoint (BLOCK_TABLE bt, int fd) {
struct block_translation_pair *pair = &t->block_translation[i];
if (pair->size > 0 && !translation_prevents_freeing(&bt->inprogress, make_blocknum(i), pair)) {
assert(!translation_prevents_freeing(&bt->current, make_blocknum(i), pair));
PRNTF("free", i, pair->size, pair->u.diskoff, bt);
block_allocator_free_block(bt->block_allocator, pair->u.diskoff);
bt->bt_block_allocator.free_block(pair->u.diskoff);
}
}
toku_free(bt->checkpointed.block_translation);
......@@ -434,8 +430,7 @@ toku_ft_unlock (FT ft) {
void
toku_block_free(BLOCK_TABLE bt, uint64_t offset) {
lock_for_blocktable(bt);
PRNTF("freeSOMETHINGunknown", 0L, 0L, offset, bt);
block_allocator_free_block(bt->block_allocator, offset);
bt->bt_block_allocator.free_block(offset);
unlock_for_blocktable(bt);
}
......@@ -463,14 +458,12 @@ blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DIS
struct translation *t = &bt->current;
struct block_translation_pair old_pair = t->block_translation[b.b];
PRNTF("old", b.b, old_pair.size, old_pair.u.diskoff, bt);
//Free the old block if it is not still in use by the checkpoint in progress or the previous checkpoint
bool cannot_free = (bool)
((!for_checkpoint && translation_prevents_freeing(&bt->inprogress, b, &old_pair)) ||
translation_prevents_freeing(&bt->checkpointed, b, &old_pair));
if (!cannot_free && old_pair.u.diskoff!=diskoff_unused) {
PRNTF("Freed", b.b, old_pair.size, old_pair.u.diskoff, bt);
block_allocator_free_block(bt->block_allocator, old_pair.u.diskoff);
bt->bt_block_allocator.free_block(old_pair.u.diskoff);
}
uint64_t allocator_offset = diskoff_unused;
......@@ -478,12 +471,11 @@ PRNTF("Freed", b.b, old_pair.size, old_pair.u.diskoff, bt);
if (size > 0) {
// Allocate a new block if the size is greater than 0,
// if the size is just 0, offset will be set to diskoff_unused
block_allocator_alloc_block(bt->block_allocator, size, &allocator_offset);
bt->bt_block_allocator.alloc_block(size, &allocator_offset);
}
t->block_translation[b.b].u.diskoff = allocator_offset;
*offset = allocator_offset;
PRNTF("New", b.b, t->block_translation[b.b].size, t->block_translation[b.b].u.diskoff, bt);
//Update inprogress btt if appropriate (if called because Pending bit is set).
if (for_checkpoint) {
paranoid_invariant(b.b < bt->inprogress.length_of_array);
......@@ -544,8 +536,7 @@ static void blocknum_alloc_translation_on_disk_unlocked(BLOCK_TABLE bt)
//Allocate a new block
int64_t size = calculate_size_on_disk(t);
uint64_t offset;
block_allocator_alloc_block(bt->block_allocator, size, &offset);
PRNTF("blokAllokator", 1L, size, offset, bt);
bt->bt_block_allocator.alloc_block(size, &offset);
t->block_translation[b.b].u.diskoff = offset;
t->block_translation[b.b].size = size;
}
......@@ -668,7 +659,6 @@ free_blocknum_in_translation(struct translation *t, BLOCKNUM b)
verify_valid_freeable_blocknum(t, b);
paranoid_invariant(t->block_translation[b.b].size != size_is_free);
PRNTF("free_blocknum", b.b, t->block_translation[b.b].size, t->block_translation[b.b].u.diskoff, bt);
t->block_translation[b.b].size = size_is_free;
t->block_translation[b.b].u.next_free_blocknum = t->blocknum_freelist_head;
t->blocknum_freelist_head = b;
......@@ -697,8 +687,7 @@ free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, bool for_checkpoint)
(translation_prevents_freeing(&bt->inprogress, b, &old_pair) ||
translation_prevents_freeing(&bt->checkpointed, b, &old_pair));
if (!cannot_free) {
PRNTF("free_blocknum_free", b.b, old_pair.size, old_pair.u.diskoff, bt);
block_allocator_free_block(bt->block_allocator, old_pair.u.diskoff);
bt->bt_block_allocator.free_block(old_pair.u.diskoff);
}
}
else {
......@@ -859,7 +848,7 @@ toku_blocktable_destroy(BLOCK_TABLE *btp) {
if (bt->inprogress.block_translation) toku_free(bt->inprogress.block_translation);
if (bt->checkpointed.block_translation) toku_free(bt->checkpointed.block_translation);
destroy_block_allocator(&bt->block_allocator);
bt->bt_block_allocator.destroy();
blocktable_lock_destroy(bt);
nb_mutex_destroy(&bt->safe_file_size_lock);
toku_free(bt);
......@@ -874,20 +863,18 @@ blocktable_create_internal (void) {
nb_mutex_init(&bt->safe_file_size_lock);
//There are two headers, so we reserve space for two.
uint64_t reserve_per_header = BLOCK_ALLOCATOR_HEADER_RESERVE;
uint64_t reserve_per_header = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
//Must reserve in multiples of BLOCK_ALLOCATOR_ALIGNMENT
//Round up the per-header usage if necessary.
//We want each header aligned.
uint64_t remainder = BLOCK_ALLOCATOR_HEADER_RESERVE % BLOCK_ALLOCATOR_ALIGNMENT;
if (remainder!=0) {
reserve_per_header += BLOCK_ALLOCATOR_ALIGNMENT;
uint64_t remainder = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE % block_allocator::BLOCK_ALLOCATOR_ALIGNMENT;
if (remainder != 0) {
reserve_per_header += block_allocator::BLOCK_ALLOCATOR_ALIGNMENT;
reserve_per_header -= remainder;
}
assert(2*reserve_per_header == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
create_block_allocator(&bt->block_allocator,
BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE,
BLOCK_ALLOCATOR_ALIGNMENT);
assert(2 * reserve_per_header == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
bt->bt_block_allocator.create(block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE, block_allocator::BLOCK_ALLOCATOR_ALIGNMENT);
return bt;
}
......@@ -942,7 +929,6 @@ translation_deserialize_from_buffer(struct translation *t, // destination int
for (i=0; i < t->length_of_array; i++) {
t->block_translation[i].u.diskoff = rbuf_diskoff(&rt);
t->block_translation[i].size = rbuf_diskoff(&rt);
PRNTF("ReadIn", i, t->block_translation[i].size, t->block_translation[i].u.diskoff, NULL);
}
assert(calculate_size_on_disk(t) == (int64_t)size_on_disk);
assert(t->block_translation[RESERVED_BLOCKNUM_TRANSLATION].size == (int64_t)size_on_disk);
......@@ -952,24 +938,22 @@ PRNTF("ReadIn", i, t->block_translation[i].size, t->block_translation[i].u.disko
}
// We just initialized a translation, inform block allocator to reserve space for each blocknum in use.
static void
blocktable_note_translation (BLOCK_ALLOCATOR allocator, struct translation *t) {
static void blocktable_note_translation(block_allocator *ba, struct translation *t) {
//This is where the space for them will be reserved (in addition to normal blocks).
//See RESERVED_BLOCKNUMS
// Previously this added blocks one at a time. Now we make an array and pass it in so it can be sorted and merged. See #3218.
struct block_allocator_blockpair *XMALLOC_N(t->smallest_never_used_blocknum.b, pairs);
struct block_allocator::blockpair *XMALLOC_N(t->smallest_never_used_blocknum.b, pairs);
uint64_t n_pairs = 0;
for (int64_t i=0; i<t->smallest_never_used_blocknum.b; i++) {
struct block_translation_pair pair = t->block_translation[i];
if (pair.size > 0) {
paranoid_invariant(pair.u.diskoff != diskoff_unused);
int cur_pair = n_pairs++;
pairs[cur_pair] = (struct block_allocator_blockpair) { .offset = (uint64_t) pair.u.diskoff,
.size = (uint64_t) pair.size };
pairs[cur_pair] = block_allocator::blockpair(pair.u.diskoff, pair.size);
}
}
block_allocator_alloc_blocks_at(allocator, n_pairs, pairs);
ba->alloc_blocks_at(n_pairs, pairs);
toku_free(pairs);
}
......@@ -989,7 +973,7 @@ toku_blocktable_create_from_buffer(int fd,
if (r != 0) {
goto exit;
}
blocktable_note_translation(bt->block_allocator, &bt->checkpointed);
blocktable_note_translation(&bt->bt_block_allocator, &bt->checkpointed);
// we just filled in checkpointed, now copy it to current.
copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT);
......@@ -1009,7 +993,7 @@ void
toku_blocktable_create_new(BLOCK_TABLE *btp) {
BLOCK_TABLE bt = blocktable_create_internal();
translation_default(&bt->checkpointed); // create default btt (empty except for reserved blocknums)
blocktable_note_translation(bt->block_allocator, &bt->checkpointed);
blocktable_note_translation(&bt->bt_block_allocator, &bt->checkpointed);
// we just created a default checkpointed, now copy it to current.
copy_translation(&bt->current, &bt->checkpointed, TRANSLATION_CURRENT);
......@@ -1103,9 +1087,9 @@ toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATIO
//Requires: report->file_size_bytes is already filled in.
//Count the headers.
report->data_bytes = BLOCK_ALLOCATOR_HEADER_RESERVE;
report->data_bytes = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
report->data_blocks = 1;
report->checkpoint_bytes_additional = BLOCK_ALLOCATOR_HEADER_RESERVE;
report->checkpoint_bytes_additional = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
report->checkpoint_blocks_additional = 1;
struct translation *current = &bt->current;
......@@ -1145,7 +1129,7 @@ toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATIO
}
}
block_allocator_get_unused_statistics(bt->block_allocator, report);
bt->bt_block_allocator.get_unused_statistics(report);
}
void
......
......@@ -89,6 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "ft/block_table.h"
#include "ft/fttypes.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-flusher.h"
......
......@@ -89,6 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-internal.h"
......@@ -1334,11 +1335,7 @@ maybe_merge_pinned_nodes(
}
}
static void merge_remove_key_callback(
BLOCKNUM *bp,
bool for_checkpoint,
void *extra)
{
static void merge_remove_key_callback(BLOCKNUM *bp, bool for_checkpoint, void *extra) {
FT ft = (FT) extra;
toku_free_blocknum(ft->blocktable, bp, ft, for_checkpoint);
}
......
......@@ -109,7 +109,6 @@ PATENT RIGHTS GRANT:
#include "toku_list.h"
#include <util/omt.h>
#include "leafentry.h"
#include "block_table.h"
#include "compress.h"
#include <util/omt.h>
#include "ft/bndata.h"
......@@ -117,6 +116,7 @@ PATENT RIGHTS GRANT:
#include "ft/rollback.h"
#include "ft/msg_buffer.h"
struct block_table;
struct ft_search;
enum { KEY_VALUE_OVERHEAD = 8 }; /* Must store the two lengths. */
......@@ -229,7 +229,7 @@ struct ft {
// These are not read-only:
// protected by blocktable lock
BLOCK_TABLE blocktable;
struct block_table *blocktable;
// protected by atomic builtins
STAT64INFO_S in_memory_stats;
......@@ -385,7 +385,7 @@ unsigned int toku_serialize_ftnode_size(FTNODE node); /* How much space will it
void toku_verify_or_set_counts(FTNODE);
size_t toku_serialize_ft_size (FT_HEADER h);
void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE cf);
void toku_serialize_ft_to (int fd, FT_HEADER h, struct block_table *blocktable, CACHEFILE cf);
void toku_serialize_ft_to_wbuf (
struct wbuf *wbuf,
FT_HEADER h,
......
......@@ -200,21 +200,22 @@ basement nodes, bulk fetch, and partial fetch:
*/
#include "checkpoint.h"
#include "cursor.h"
#include "ft.h"
#include "ft-cachetable-wrappers.h"
#include "ft-flusher.h"
#include "ft-internal.h"
#include "node.h"
#include "ft_layout_version.h"
#include "log-internal.h"
#include "sub_block.h"
#include "txn_manager.h"
#include "leafentry.h"
#include "xids.h"
#include "ft_msg.h"
#include "ule.h"
#include "ft/block_table.h"
#include "ft/checkpoint.h"
#include "ft/cursor.h"
#include "ft/ft.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-flusher.h"
#include "ft/ft-internal.h"
#include "ft/ft_layout_version.h"
#include "ft/ft_msg.h"
#include "ft/leafentry.h"
#include "ft/log-internal.h"
#include "ft/node.h"
#include "ft/sub_block.h"
#include "ft/txn_manager.h"
#include "ft/ule.h"
#include "ft/xids.h"
#include <toku_race_tools.h>
......
......@@ -89,9 +89,10 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "compress.h"
#include "ft.h"
#include "ft-internal.h"
#include "ft/block_table.h"
#include "ft/compress.h"
#include "ft/ft.h"
#include "ft/ft-internal.h"
// not version-sensitive because we only serialize a descriptor using the current layout_version
uint32_t
......@@ -509,7 +510,7 @@ serialize_ft_min_size (uint32_t version) {
abort();
}
lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
lazy_assert(size <= block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
return size;
}
......@@ -586,7 +587,7 @@ int deserialize_ft_from_fd_into_rbuf(int fd,
//If too big, it is corrupt. We would probably notice during checksum
//but may have to do a multi-gigabyte malloc+read to find out.
//If its too small reading rbuf would crash, so verify.
if (size > BLOCK_ALLOCATOR_HEADER_RESERVE || size < min_header_size) {
if (size > block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE || size < min_header_size) {
r = TOKUDB_DICTIONARY_NO_HEADER;
goto exit;
}
......@@ -675,7 +676,7 @@ toku_deserialize_ft_from(int fd,
h0_acceptable = true;
}
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE;
toku_off_t header_1_off = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
r1 = deserialize_ft_from_fd_into_rbuf(fd, header_1_off, &rb_1, &checkpoint_count_1, &checkpoint_lsn_1, &version_1);
if (r1 == 0 && checkpoint_lsn_1.lsn <= max_acceptable_lsn.lsn) {
h1_acceptable = true;
......@@ -754,7 +755,7 @@ toku_deserialize_ft_from(int fd,
size_t toku_serialize_ft_size (FT_HEADER h) {
size_t size = serialize_ft_min_size(h->layout_version);
//There is no dynamic data.
lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
lazy_assert(size <= block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
return size;
}
......@@ -816,7 +817,7 @@ void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFIL
struct wbuf w_main;
size_t size_main = toku_serialize_ft_size(h);
size_t size_main_aligned = roundup_to_multiple(512, size_main);
assert(size_main_aligned<BLOCK_ALLOCATOR_HEADER_RESERVE);
assert(size_main_aligned<block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE);
char *XMALLOC_N_ALIGNED(512, size_main_aligned, mainbuf);
for (size_t i=size_main; i<size_main_aligned; i++) mainbuf[i]=0; // initialize the end of the buffer with zeros
wbuf_init(&w_main, mainbuf, size_main);
......@@ -844,7 +845,7 @@ void toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFIL
//Alternate writing header to two locations:
// Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE
toku_off_t main_offset;
main_offset = (h->checkpoint_count & 0x1) ? 0 : BLOCK_ALLOCATOR_HEADER_RESERVE;
main_offset = (h->checkpoint_count & 0x1) ? 0 : block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
toku_os_full_pwrite(fd, w_main.buf, size_main_aligned, main_offset);
toku_free(w_main.buf);
toku_free(w_translation.buf);
......
......@@ -97,10 +97,11 @@ PATENT RIGHTS GRANT:
* For each nonleaf node: All the messages have keys that are between the associated pivot keys ( left_pivot_key < message <= right_pivot_key)
*/
#include "ft-cachetable-wrappers.h"
#include "ft-internal.h"
#include "ft.h"
#include "node.h"
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-internal.h"
#include "ft/node.h"
static int
compare_pairs (FT_HANDLE ft_handle, const DBT *a, const DBT *b) {
......
......@@ -89,6 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/ft-cachetable-wrappers.h"
#include "ft/ft-internal.h"
......@@ -107,10 +108,10 @@ toku_reset_root_xid_that_created(FT ft, TXNID new_root_xid_that_created) {
// hold lock around setting and clearing of dirty bit
// (see cooperative use of dirty bit in ft_begin_checkpoint())
toku_ft_lock (ft);
toku_ft_lock(ft);
ft->h->root_xid_that_created = new_root_xid_that_created;
ft->h->dirty = 1;
toku_ft_unlock (ft);
toku_ft_unlock(ft);
}
static void
......
......@@ -89,6 +89,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "ft/block_table.h"
#include "ft/cachetable.h"
#include "ft/compress.h"
#include "ft/ft.h"
......
......@@ -200,7 +200,7 @@ deserialize_headers(int fd, struct ft **h1p, struct ft **h2p)
}
}
{
toku_off_t header_1_off = BLOCK_ALLOCATOR_HEADER_RESERVE;
toku_off_t header_1_off = block_allocator::BLOCK_ALLOCATOR_HEADER_RESERVE;
r1 = deserialize_ft_from_fd_into_rbuf(
fd,
header_1_off,
......
......@@ -100,18 +100,19 @@ PATENT RIGHTS GRANT:
#include <string.h>
#include <fcntl.h>
#include <util/x1764.h>
#include "loader/loader-internal.h"
#include "ft-internal.h"
#include "sub_block.h"
#include "sub_block_map.h"
#include "loader/pqueue.h"
#include "loader/dbufio.h"
#include "leafentry.h"
#include "log-internal.h"
#include "ft.h"
#include "node.h"
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/ft-internal.h"
#include "ft/leafentry.h"
#include "ft/loader/loader-internal.h"
#include "ft/loader/pqueue.h"
#include "ft/loader/dbufio.h"
#include "ft/log-internal.h"
#include "ft/node.h"
#include "ft/sub_block.h"
#include "ft/sub_block_map.h"
#include "util/x1764.h"
static size_t (*os_fwrite_fun)(const void *,size_t,size_t,FILE*)=NULL;
void ft_loader_set_os_fwrite (size_t (*fwrite_fun)(const void*,size_t,size_t,FILE*)) {
......
......@@ -94,12 +94,13 @@ PATENT RIGHTS GRANT:
#include <limits.h>
#include <unistd.h>
#include "ft.h"
#include "log-internal.h"
#include "txn_manager.h"
#include "rollback_log_node_cache.h"
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/log-internal.h"
#include "ft/txn_manager.h"
#include "ft/rollback_log_node_cache.h"
#include <util/status.h>
#include "util/status.h"
static const int log_format_version=TOKU_LOG_VERSION;
......
......@@ -89,15 +89,16 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <toku_portability.h>
#include <memory.h>
#include "portability/memory.h"
#include "portability/toku_portability.h"
#include "ft-internal.h"
#include "fttypes.h"
#include "rollback.h"
#include "rollback-ct-callbacks.h"
#include "ft/block_table.h"
#include "ft/ft-internal.h"
#include "ft/fttypes.h"
#include "ft/rollback.h"
#include "ft/rollback-ct-callbacks.h"
#include <util/memarena.h>
#include "util/memarena.h"
// Address used as a sentinel. Otherwise unused.
static struct serialized_rollback_log_node cloned_rollback;
......
......@@ -91,25 +91,21 @@ PATENT RIGHTS GRANT:
#include <toku_stdint.h>
#include "ft.h"
#include "log-internal.h"
#include "rollback-ct-callbacks.h"
#include "ft/block_table.h"
#include "ft/ft.h"
#include "ft/log-internal.h"
#include "ft/rollback-ct-callbacks.h"
static void rollback_unpin_remove_callback(CACHEKEY* cachekey, bool for_checkpoint, void* extra) {
FT CAST_FROM_VOIDP(h, extra);
toku_free_blocknum(
h->blocktable,
cachekey,
h,
for_checkpoint
);
FT CAST_FROM_VOIDP(ft, extra);
toku_free_blocknum(ft->blocktable, cachekey, ft, for_checkpoint);
}
void toku_rollback_log_unpin_and_remove(TOKUTXN txn, ROLLBACK_LOG_NODE log) {
int r;
CACHEFILE cf = txn->logger->rollback_cachefile;
FT CAST_FROM_VOIDP(h, toku_cachefile_get_userdata(cf));
r = toku_cachetable_unpin_and_remove (cf, log->ct_pair, rollback_unpin_remove_callback, h);
FT CAST_FROM_VOIDP(ft, toku_cachefile_get_userdata(cf));
r = toku_cachetable_unpin_and_remove (cf, log->ct_pair, rollback_unpin_remove_callback, ft);
assert(r == 0);
}
......
......@@ -91,42 +91,40 @@ PATENT RIGHTS GRANT:
#include "test.h"
static void ba_alloc_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) {
block_allocator_validate(ba);
block_allocator_alloc_block_at(ba, size*512, offset*512);
block_allocator_validate(ba);
static void ba_alloc_at(block_allocator *ba, uint64_t size, uint64_t offset) {
ba->validate();
ba->alloc_block_at(size * 512, offset * 512);
ba->validate();
}
static void ba_alloc (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *answer) {
block_allocator_validate(ba);
static void ba_alloc(block_allocator *ba, uint64_t size, uint64_t *answer) {
ba->validate();
uint64_t actual_answer;
block_allocator_alloc_block(ba, 512*size, &actual_answer);
block_allocator_validate(ba);
ba->alloc_block(512 * size, &actual_answer);
ba->validate();
assert(actual_answer%512==0);
*answer = actual_answer/512;
}
static void ba_free (BLOCK_ALLOCATOR ba, uint64_t offset) {
block_allocator_validate(ba);
block_allocator_free_block(ba, offset*512);
block_allocator_validate(ba);
static void ba_free(block_allocator *ba, uint64_t offset) {
ba->validate();
ba->free_block(offset * 512);
ba->validate();
}
static void
ba_check_l (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order, uint64_t expected_offset, uint64_t expected_size)
{
static void ba_check_l(block_allocator *ba, uint64_t blocknum_in_layout_order,
uint64_t expected_offset, uint64_t expected_size) {
uint64_t actual_offset, actual_size;
int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size);
int r = ba->get_nth_block_in_layout_order(blocknum_in_layout_order, &actual_offset, &actual_size);
assert(r==0);
assert(expected_offset*512 == actual_offset);
assert(expected_size *512 == actual_size);
}
static void
ba_check_none (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order)
{
static void ba_check_none(block_allocator *ba, uint64_t blocknum_in_layout_order) {
uint64_t actual_offset, actual_size;
int r = block_allocator_get_nth_block_in_layout_order(ba, blocknum_in_layout_order, &actual_offset, &actual_size);
int r = ba->get_nth_block_in_layout_order(blocknum_in_layout_order, &actual_offset, &actual_size);
assert(r==-1);
}
......@@ -134,12 +132,13 @@ ba_check_none (BLOCK_ALLOCATOR ba, uint64_t blocknum_in_layout_order)
// Simple block allocator test
static void
test_ba0 (void) {
BLOCK_ALLOCATOR ba;
block_allocator allocator;
block_allocator *ba = &allocator;
uint64_t b0, b1;
create_block_allocator(&ba, 100*512, 1*512);
assert(block_allocator_allocated_limit(ba)==100*512);
ba->create(100*512, 1*512);
assert(ba->allocated_limit()==100*512);
ba_alloc_at(ba, 50, 100);
assert(block_allocator_allocated_limit(ba)==150*512);
assert(ba->allocated_limit()==150*512);
ba_alloc_at(ba, 25, 150);
ba_alloc (ba, 10, &b0);
ba_check_l (ba, 0, 0, 100);
......@@ -154,9 +153,9 @@ test_ba0 (void) {
assert(b0==160);
ba_alloc(ba, 10, &b0);
ba_alloc(ba, 113, &b1);
assert(113*512==block_allocator_block_size(ba, b1 *512));
assert(10 *512==block_allocator_block_size(ba, b0 *512));
assert(50 *512==block_allocator_block_size(ba, 100*512));
assert(113*512==ba->block_size(b1 *512));
assert(10 *512==ba->block_size(b0 *512));
assert(50 *512==ba->block_size(100*512));
uint64_t b2, b3, b4, b5, b6, b7;
ba_alloc(ba, 100, &b2);
......@@ -183,15 +182,15 @@ test_ba0 (void) {
ba_free(ba, b4);
ba_alloc(ba, 100, &b4);
destroy_block_allocator(&ba);
assert(ba==0);
ba->destroy();
}
// Manually to get coverage of all the code in the block allocator.
static void
test_ba1 (int n_initial) {
BLOCK_ALLOCATOR ba;
create_block_allocator(&ba, 0*512, 1*512);
block_allocator allocator;
block_allocator *ba = &allocator;
ba->create(0*512, 1*512);
int i;
int n_blocks=0;
uint64_t blocks[1000];
......@@ -213,19 +212,19 @@ test_ba1 (int n_initial) {
}
}
destroy_block_allocator(&ba);
assert(ba==0);
ba->destroy();
}
// Check to see if it is first fit or best fit.
static void
test_ba2 (void)
{
BLOCK_ALLOCATOR ba;
block_allocator allocator;
block_allocator *ba = &allocator;
uint64_t b[6];
enum { BSIZE = 1024 };
create_block_allocator(&ba, 100*512, BSIZE*512);
assert(block_allocator_allocated_limit(ba)==100*512);
ba->create(100*512, BSIZE*512);
assert(ba->allocated_limit()==100*512);
ba_check_l (ba, 0, 0, 100);
ba_check_none (ba, 1);
......@@ -344,7 +343,7 @@ test_ba2 (void)
ba_alloc(ba, 100, &b11);
assert(b11==5*BSIZE);
destroy_block_allocator(&ba);
ba->destroy();
}
int
......
......@@ -435,10 +435,10 @@ test_prefetching(void) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
......@@ -450,7 +450,7 @@ test_prefetching(void) {
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
ft_h->cmp.destroy();
toku_free(ft_h->h);
......
......@@ -371,10 +371,10 @@ test_serialize_nonleaf(void) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
......@@ -387,7 +387,7 @@ test_serialize_nonleaf(void) {
toku_destroy_ftnode_internals(&sn);
toku_free(ndd);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
ft_h->cmp.destroy();
......@@ -451,10 +451,10 @@ test_serialize_leaf(void) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
......@@ -466,7 +466,7 @@ test_serialize_leaf(void) {
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......
......@@ -211,10 +211,10 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
......@@ -277,7 +277,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy, int ser_runs, int de
toku_ftnode_free(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
ft_h->cmp.destroy();
toku_free(ft_h->h);
......@@ -374,10 +374,10 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
......@@ -412,7 +412,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy, int ser_runs, int
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
ft_h->cmp.destroy();
......
......@@ -315,10 +315,10 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA src_ndd = NULL;
......@@ -373,7 +373,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, bool do_clone) {
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -448,10 +448,10 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA src_ndd = NULL;
......@@ -508,7 +508,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, bool do_clone
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -574,10 +574,10 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
......@@ -636,7 +636,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, bool do_clone) {
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -709,10 +709,10 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
......@@ -773,7 +773,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, bool do_clone)
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -845,10 +845,10 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA src_ndd = NULL;
......@@ -901,7 +901,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, bool
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -965,10 +965,10 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
......@@ -1000,7 +1000,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
toku_free(ft_h->h);
toku_free(ft_h);
......@@ -1088,10 +1088,10 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
DISKOFF offset;
DISKOFF size;
toku_blocknum_realloc_on_disk(ft_h->blocktable, b, 100, &offset, ft_h, fd, false);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset==block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(ft_h->blocktable, b, &offset, &size);
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(offset == block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100);
}
FTNODE_DISK_DATA src_ndd = NULL;
......@@ -1123,7 +1123,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
toku_ftnode_free(&dn);
toku_destroy_ftnode_internals(&sn);
toku_block_free(ft_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_block_free(ft_h->blocktable, block_allocator::BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_blocktable_destroy(&ft_h->blocktable);
ft_h->cmp.destroy();
toku_free(ft_h->h);
......
......@@ -95,7 +95,7 @@ PATENT RIGHTS GRANT:
int verbose = 0;
static void
print_array (uint64_t n, const struct block_allocator_blockpair a[/*n*/]) {
print_array (uint64_t n, const struct block_allocator::blockpair a[/*n*/]) {
printf("{");
for (uint64_t i=0; i<n; i++) printf(" %016lx", (long)a[i].offset);
printf("}\n");
......@@ -103,20 +103,20 @@ print_array (uint64_t n, const struct block_allocator_blockpair a[/*n*/]) {
static int
compare_blockpairs (const void *av, const void *bv) {
const struct block_allocator_blockpair *CAST_FROM_VOIDP(a, av);
const struct block_allocator_blockpair *CAST_FROM_VOIDP(b, bv);
const struct block_allocator::blockpair *CAST_FROM_VOIDP(a, av);
const struct block_allocator::blockpair *CAST_FROM_VOIDP(b, bv);
if (a->offset < b->offset) return -1;
if (a->offset > b->offset) return +1;
return 0;
}
static void
test_merge (uint64_t an, const struct block_allocator_blockpair a[/*an*/],
uint64_t bn, const struct block_allocator_blockpair b[/*bn*/]) {
test_merge (uint64_t an, const struct block_allocator::blockpair a[/*an*/],
uint64_t bn, const struct block_allocator::blockpair b[/*bn*/]) {
if (verbose>1) { printf("a:"); print_array(an, a); }
if (verbose>1) { printf("b:"); print_array(bn, b); }
struct block_allocator_blockpair *MALLOC_N(an+bn, q);
struct block_allocator_blockpair *MALLOC_N(an+bn, m);
struct block_allocator::blockpair *MALLOC_N(an+bn, q);
struct block_allocator::blockpair *MALLOC_N(an+bn, m);
if (q==0 || m==0) {
fprintf(stderr, "malloc failed, continuing\n");
goto malloc_failed;
......@@ -131,7 +131,7 @@ test_merge (uint64_t an, const struct block_allocator_blockpair a[/*an*/],
qsort(q, an+bn, sizeof(*q), compare_blockpairs);
if (verbose>1) { printf("q:"); print_array(an+bn, q); }
if (verbose) printf("merge\n");
block_allocator_merge_blockpairs_into(an, m, bn, b);
block_allocator::merge_blockpairs_into(an, m, bn, b);
if (verbose) printf("compare\n");
if (verbose>1) { printf("m:"); print_array(an+bn, m); }
for (uint64_t i=0; i<an+bn; i++) {
......@@ -163,8 +163,8 @@ compute_b (uint64_t i, int mode) {
static void
test_merge_n_m (uint64_t n, uint64_t m, int mode)
{
struct block_allocator_blockpair *MALLOC_N(n, na);
struct block_allocator_blockpair *MALLOC_N(m, ma);
struct block_allocator::blockpair *MALLOC_N(n, na);
struct block_allocator::blockpair *MALLOC_N(m, ma);
if (na==0 || ma==0) {
fprintf(stderr, "malloc failed, continuing\n");
goto malloc_failed;
......@@ -197,8 +197,8 @@ test_big_merge (void) {
uint64_t an = twoG;
uint64_t bn = 1;
struct block_allocator_blockpair *MALLOC_N(an+bn, a);
struct block_allocator_blockpair *MALLOC_N(bn, b);
struct block_allocator::blockpair *MALLOC_N(an+bn, a);
struct block_allocator::blockpair *MALLOC_N(bn, b);
if (a == nullptr) {
fprintf(stderr, "%s:%u malloc failed, continuing\n", __FUNCTION__, __LINE__);
goto malloc_failed;
......@@ -211,7 +211,7 @@ test_big_merge (void) {
assert(b);
for (uint64_t i=0; i<an; i++) a[i].offset=i+1;
b[0].offset = 0;
block_allocator_merge_blockpairs_into(an, a, bn, b);
block_allocator::merge_blockpairs_into(an, a, bn, b);
for (uint64_t i=0; i<an+bn; i++) assert(a[i].offset == i);
malloc_failed:
toku_free(a);
......
......@@ -91,11 +91,6 @@ PATENT RIGHTS GRANT:
// Dump a fractal tree file
#include "cachetable.h"
#include "ft.h"
#include "fttypes.h"
#include "ft-internal.h"
#include "ft/node.h"
#include <ctype.h>
#include <stdint.h>
#include <stdio.h>
......@@ -103,6 +98,13 @@ PATENT RIGHTS GRANT:
#include <inttypes.h>
#include <limits.h>
#include "ft/block_table.h"
#include "ft/cachetable.h"
#include "ft/ft.h"
#include "ft/fttypes.h"
#include "ft/ft-internal.h"
#include "ft/node.h"
static int do_dump_data = 1;
static int do_interactive = 0;
static int do_header = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment