Commit 0acaea98 authored by John Esmet's avatar John Esmet

FT-302 Add block allocation strategy to the block allocator. Default to

the one and only strategy so far - first fit.
parent 46ab9930
...@@ -119,6 +119,7 @@ void block_allocator::create(uint64_t reserve_at_beginning, uint64_t alignment) ...@@ -119,6 +119,7 @@ void block_allocator::create(uint64_t reserve_at_beginning, uint64_t alignment)
_blocks_array_size = 1; _blocks_array_size = 1;
XMALLOC_N(_blocks_array_size, _blocks_array); XMALLOC_N(_blocks_array_size, _blocks_array);
_n_bytes_in_use = reserve_at_beginning; _n_bytes_in_use = reserve_at_beginning;
_strategy = BA_STRATEGY_FIRST_FIT;
VALIDATE(); VALIDATE();
} }
...@@ -127,6 +128,10 @@ void block_allocator::destroy() { ...@@ -127,6 +128,10 @@ void block_allocator::destroy() {
toku_free(_blocks_array); toku_free(_blocks_array);
} }
void block_allocator::set_strategy(enum allocation_strategy strategy) {
_strategy = strategy;
}
void block_allocator::grow_blocks_array_by(uint64_t n_to_add) { void block_allocator::grow_blocks_array_by(uint64_t n_to_add) {
if (_n_blocks + n_to_add > _blocks_array_size) { if (_n_blocks + n_to_add > _blocks_array_size) {
uint64_t new_size = _n_blocks + n_to_add; uint64_t new_size = _n_blocks + n_to_add;
...@@ -221,6 +226,34 @@ static inline uint64_t align(uint64_t value, uint64_t ba_alignment) { ...@@ -221,6 +226,34 @@ static inline uint64_t align(uint64_t value, uint64_t ba_alignment) {
return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment; return ((value + ba_alignment - 1) / ba_alignment) * ba_alignment;
} }
static struct block_allocator::blockpair *
choose_block_first_fit_strategy(struct block_allocator::blockpair *blocks_array,
uint64_t n_blocks, uint64_t size,
uint64_t alignment) {
// Implement first fit.
for (uint64_t blocknum = 0; blocknum + 1 < n_blocks; blocknum++) {
// Consider the space after blocknum
struct block_allocator::blockpair *bp = &blocks_array[blocknum];
uint64_t possible_offset = align(bp->offset + bp->size, alignment);
if (possible_offset + size <= bp[1].offset) {
return bp;
}
}
return nullptr;
}
// TODO: other strategies
// TODO: Put strategies in their own file, ft/serialize/block_allocator_strategy.{cc,h}?
struct block_allocator::blockpair *block_allocator::choose_block_to_alloc_after(size_t size) {
switch (_strategy) {
case BA_STRATEGY_FIRST_FIT:
return choose_block_first_fit_strategy(_blocks_array, _n_blocks, size, _alignment);
default:
abort();
}
}
// Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512). // Effect: Allocate a block. The resulting block must be aligned on the ba->alignment (which to make direct_io happy must be a positive multiple of 512).
void block_allocator::alloc_block(uint64_t size, uint64_t *offset) { void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
// Allocator does not support size 0 blocks. See block_allocator_free_block. // Allocator does not support size 0 blocks. See block_allocator_free_block.
...@@ -228,6 +261,8 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) { ...@@ -228,6 +261,8 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
grow_blocks_array(); grow_blocks_array();
_n_bytes_in_use += size; _n_bytes_in_use += size;
// First and only block
if (_n_blocks == 0) { if (_n_blocks == 0) {
assert(_n_bytes_in_use == _reserve_at_beginning + size); // we know exactly how many are in use assert(_n_bytes_in_use == _reserve_at_beginning + size); // we know exactly how many are in use
_blocks_array[0].offset = align(_reserve_at_beginning, _alignment); _blocks_array[0].offset = align(_reserve_at_beginning, _alignment);
...@@ -237,11 +272,9 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) { ...@@ -237,11 +272,9 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
return; return;
} }
// Implement first fit. // Check to see if the space immediately after the reserve is big enough to hold the new block.
{
uint64_t end_of_reserve = align(_reserve_at_beginning, _alignment); uint64_t end_of_reserve = align(_reserve_at_beginning, _alignment);
if (end_of_reserve + size <= _blocks_array[0].offset ) { if (end_of_reserve + size <= _blocks_array[0].offset ) {
// Check to see if the space immediately after the reserve is big enough to hold the new block.
struct blockpair *bp = &_blocks_array[0]; struct blockpair *bp = &_blocks_array[0];
memmove(bp + 1, bp, _n_blocks * sizeof(*bp)); memmove(bp + 1, bp, _n_blocks * sizeof(*bp));
bp[0].offset = end_of_reserve; bp[0].offset = end_of_reserve;
...@@ -251,36 +284,27 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) { ...@@ -251,36 +284,27 @@ void block_allocator::alloc_block(uint64_t size, uint64_t *offset) {
VALIDATE(); VALIDATE();
return; return;
} }
}
for (uint64_t blocknum = 0; blocknum + 1 < _n_blocks; blocknum++) {
// Consider the space after blocknum
struct blockpair *bp = &_blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t answer_offset = align(this_offset + this_size, _alignment);
if (answer_offset + size > bp[1].offset) {
continue; // The block we want doesn't fit after this block.
}
// It fits, so allocate it here. struct blockpair *bp = choose_block_first_fit_strategy(_blocks_array, _n_blocks, size, _alignment);
if (bp != nullptr) {
// our allocation strategy chose the space after `bp' to fit the new block
uint64_t answer_offset = align(bp->offset + bp->size, _alignment);
uint64_t blocknum = bp - _blocks_array;
assert(&_blocks_array[blocknum] == bp);
memmove(bp + 2, bp + 1, (_n_blocks - blocknum - 1) * sizeof(*bp)); memmove(bp + 2, bp + 1, (_n_blocks - blocknum - 1) * sizeof(*bp));
bp[1].offset = answer_offset; bp[1].offset = answer_offset;
bp[1].size = size; bp[1].size = size;
_n_blocks++;
*offset = answer_offset; *offset = answer_offset;
VALIDATE(); } else {
return;
}
// It didn't fit anywhere, so fit it on the end. // It didn't fit anywhere, so fit it on the end.
assert(_n_blocks < _blocks_array_size); assert(_n_blocks < _blocks_array_size);
struct blockpair *bp = &_blocks_array[_n_blocks]; bp = &_blocks_array[_n_blocks];
uint64_t answer_offset = align(bp[-1].offset + bp[-1].size, _alignment); uint64_t answer_offset = align(bp[-1].offset + bp[-1].size, _alignment);
bp->offset = answer_offset; bp->offset = answer_offset;
bp->size = size; bp->size = size;
_n_blocks++;
*offset = answer_offset; *offset = answer_offset;
}
_n_blocks++;
VALIDATE(); VALIDATE();
} }
......
...@@ -124,7 +124,12 @@ class block_allocator { ...@@ -124,7 +124,12 @@ class block_allocator {
static const size_t BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE = BLOCK_ALLOCATOR_HEADER_RESERVE * 2; static const size_t BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE = BLOCK_ALLOCATOR_HEADER_RESERVE * 2;
enum allocation_strategy {
BA_STRATEGY_FIRST_FIT = 1
};
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block. // Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// The default allocation strategy is first fit (BA_STRATEGY_FIRST_FIT)
// All blocks be start on a multiple of ALIGNMENT. // All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory. // Aborts if we run out of memory.
// Parameters // Parameters
...@@ -135,6 +140,10 @@ class block_allocator { ...@@ -135,6 +140,10 @@ class block_allocator {
// Effect: Destroy this block allocator // Effect: Destroy this block allocator
void destroy(); void destroy();
// Effect: Set the allocation strategy that the allocator should use
// Requires: No other threads are operating on this block allocator
void set_strategy(enum allocation_strategy strategy);
// Effect: Allocate a block of the specified size at a particular offset. // Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong. // Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use. // The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
...@@ -219,6 +228,7 @@ class block_allocator { ...@@ -219,6 +228,7 @@ class block_allocator {
void grow_blocks_array_by(uint64_t n_to_add); void grow_blocks_array_by(uint64_t n_to_add);
void grow_blocks_array(); void grow_blocks_array();
int64_t find_block(uint64_t offset); int64_t find_block(uint64_t offset);
struct blockpair *choose_block_to_alloc_after(size_t size);
static int compare_blockpairs(const void *av, const void *bv); static int compare_blockpairs(const void *av, const void *bv);
...@@ -234,4 +244,6 @@ class block_allocator { ...@@ -234,4 +244,6 @@ class block_allocator {
struct blockpair *_blocks_array; struct blockpair *_blocks_array;
// Including the reserve_at_beginning // Including the reserve_at_beginning
uint64_t _n_bytes_in_use; uint64_t _n_bytes_in_use;
// The allocation strategy are we using
enum allocation_strategy _strategy;
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment