Commit a13a8e84 authored by John Esmet's avatar John Esmet

FT-93 Align pivotkeys to a 4 byte boundary. Align memory allocation to a 64

byte boundary.
parent 725fc5ec
...@@ -150,11 +150,15 @@ class ftnode_pivot_keys { ...@@ -150,11 +150,15 @@ class ftnode_pivot_keys {
size_t total_size() const; size_t total_size() const;
private: private:
inline size_t _align4(size_t x) const {
return roundup_to_multiple(4, x);
}
// effect: create pivot keys, in fixed key format, by copying the given key array // effect: create pivot keys, in fixed key format, by copying the given key array
void _create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n); void _create_from_fixed_keys(const char *fixedkeys, size_t fixed_keylen, int n);
char *_fixed_key(int i) const { char *_fixed_key(int i) const {
return &_fixed_keys[i * _fixed_keylen]; return &_fixed_keys[i * _fixed_keylen_aligned];
} }
bool _fixed_format() const { bool _fixed_format() const {
...@@ -187,7 +191,10 @@ class ftnode_pivot_keys { ...@@ -187,7 +191,10 @@ class ftnode_pivot_keys {
// If every key is _fixed_keylen long, then _fixed_key is a // If every key is _fixed_keylen long, then _fixed_key is a
// packed array of keys.. // packed array of keys..
char *_fixed_keys; char *_fixed_keys;
// The actual length of the fixed key
size_t _fixed_keylen; size_t _fixed_keylen;
// The aligned length that we use for fixed key storage
size_t _fixed_keylen_aligned;
// ..otherwise _fixed_keys is null and we store an array of dbts, // ..otherwise _fixed_keys is null and we store an array of dbts,
// each representing a key. this is simpler but less cache-efficient. // each representing a key. this is simpler but less cache-efficient.
......
...@@ -102,6 +102,7 @@ void ftnode_pivot_keys::create_empty() { ...@@ -102,6 +102,7 @@ void ftnode_pivot_keys::create_empty() {
_total_size = 0; _total_size = 0;
_fixed_keys = nullptr; _fixed_keys = nullptr;
_fixed_keylen = 0; _fixed_keylen = 0;
_fixed_keylen_aligned = 0;
_dbt_keys = nullptr; _dbt_keys = nullptr;
} }
...@@ -121,15 +122,16 @@ void ftnode_pivot_keys::create_from_dbts(const DBT *keys, int n) { ...@@ -121,15 +122,16 @@ void ftnode_pivot_keys::create_from_dbts(const DBT *keys, int n) {
if (keys_same_size && _num_pivots > 0) { if (keys_same_size && _num_pivots > 0) {
// if so, store pivots in a tightly packed array of fixed length keys // if so, store pivots in a tightly packed array of fixed length keys
_fixed_keylen = keys[0].size; _fixed_keylen = keys[0].size;
_total_size = _fixed_keylen * _num_pivots; _fixed_keylen_aligned = _align4(_fixed_keylen);
XMALLOC_N(_total_size, _fixed_keys); _total_size = _fixed_keylen_aligned * _num_pivots;
XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
for (int i = 0; i < _num_pivots; i++) { for (int i = 0; i < _num_pivots; i++) {
invariant(keys[i].size == _fixed_keylen); invariant(keys[i].size == _fixed_keylen);
memcpy(_fixed_key(i), keys[i].data, _fixed_keylen); memcpy(_fixed_key(i), keys[i].data, _fixed_keylen);
} }
} else { } else {
// otherwise we'll just store the pivots in an array of dbts // otherwise we'll just store the pivots in an array of dbts
XMALLOC_N(_num_pivots, _dbt_keys); XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
for (int i = 0; i < _num_pivots; i++) { for (int i = 0; i < _num_pivots; i++) {
size_t size = keys[i].size; size_t size = keys[i].size;
toku_memdup_dbt(&_dbt_keys[i], keys[i].data, size); toku_memdup_dbt(&_dbt_keys[i], keys[i].data, size);
...@@ -142,7 +144,8 @@ void ftnode_pivot_keys::_create_from_fixed_keys(const char *fixedkeys, size_t fi ...@@ -142,7 +144,8 @@ void ftnode_pivot_keys::_create_from_fixed_keys(const char *fixedkeys, size_t fi
create_empty(); create_empty();
_num_pivots = n; _num_pivots = n;
_fixed_keylen = fixed_keylen; _fixed_keylen = fixed_keylen;
_total_size = _fixed_keylen * _num_pivots; _fixed_keylen_aligned = _align4(fixed_keylen);
_total_size = _fixed_keylen_aligned * _num_pivots;
XMEMDUP_N(_fixed_keys, fixedkeys, _total_size); XMEMDUP_N(_fixed_keys, fixedkeys, _total_size);
} }
...@@ -168,6 +171,7 @@ void ftnode_pivot_keys::destroy() { ...@@ -168,6 +171,7 @@ void ftnode_pivot_keys::destroy() {
_fixed_keys = nullptr; _fixed_keys = nullptr;
} }
_fixed_keylen = 0; _fixed_keylen = 0;
_fixed_keylen_aligned = 0;
_num_pivots = 0; _num_pivots = 0;
_total_size = 0; _total_size = 0;
} }
...@@ -177,8 +181,9 @@ void ftnode_pivot_keys::_convert_to_fixed_format() { ...@@ -177,8 +181,9 @@ void ftnode_pivot_keys::_convert_to_fixed_format() {
// convert to a tightly packed array of fixed length keys // convert to a tightly packed array of fixed length keys
_fixed_keylen = _dbt_keys[0].size; _fixed_keylen = _dbt_keys[0].size;
_total_size = _fixed_keylen * _num_pivots; _fixed_keylen_aligned = _align4(_fixed_keylen);
XMALLOC_N(_total_size, _fixed_keys); _total_size = _fixed_keylen_aligned * _num_pivots;
XMALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
for (int i = 0; i < _num_pivots; i++) { for (int i = 0; i < _num_pivots; i++) {
invariant(_dbt_keys[i].size == _fixed_keylen); invariant(_dbt_keys[i].size == _fixed_keylen);
memcpy(_fixed_key(i), _dbt_keys[i].data, _fixed_keylen); memcpy(_fixed_key(i), _dbt_keys[i].data, _fixed_keylen);
...@@ -198,7 +203,7 @@ void ftnode_pivot_keys::_convert_to_dbt_format() { ...@@ -198,7 +203,7 @@ void ftnode_pivot_keys::_convert_to_dbt_format() {
invariant(_fixed_format()); invariant(_fixed_format());
// convert to an aray of dbts // convert to an aray of dbts
XREALLOC_N(_num_pivots, _dbt_keys); REALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
for (int i = 0; i < _num_pivots; i++) { for (int i = 0; i < _num_pivots; i++) {
toku_memdup_dbt(&_dbt_keys[i], _fixed_key(i), _fixed_keylen); toku_memdup_dbt(&_dbt_keys[i], _fixed_key(i), _fixed_keylen);
} }
...@@ -218,7 +223,7 @@ void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) { ...@@ -218,7 +223,7 @@ void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) {
_fixed_keylen = 0; _fixed_keylen = 0;
_dbt_keys = nullptr; _dbt_keys = nullptr;
XMALLOC_N(_num_pivots, _dbt_keys); XMALLOC_N_ALIGNED(64, _num_pivots, _dbt_keys);
bool keys_same_size = true; bool keys_same_size = true;
for (int i = 0; i < _num_pivots; i++) { for (int i = 0; i < _num_pivots; i++) {
bytevec pivotkeyptr; bytevec pivotkeyptr;
...@@ -240,7 +245,7 @@ void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) { ...@@ -240,7 +245,7 @@ void ftnode_pivot_keys::deserialize_from_rbuf(struct rbuf *rb, int n) {
DBT ftnode_pivot_keys::get_pivot(int i) const { DBT ftnode_pivot_keys::get_pivot(int i) const {
paranoid_invariant(i < _num_pivots); paranoid_invariant(i < _num_pivots);
if (_fixed_format()) { if (_fixed_format()) {
paranoid_invariant(i * _fixed_keylen < _total_size); paranoid_invariant(i * _fixed_keylen_aligned < _total_size);
DBT dbt; DBT dbt;
toku_fill_dbt(&dbt, _fixed_key(i), _fixed_keylen); toku_fill_dbt(&dbt, _fixed_key(i), _fixed_keylen);
return dbt; return dbt;
...@@ -272,16 +277,18 @@ void ftnode_pivot_keys::_destroy_key_dbt(int i) { ...@@ -272,16 +277,18 @@ void ftnode_pivot_keys::_destroy_key_dbt(int i) {
void ftnode_pivot_keys::_insert_at_dbt(const DBT *key, int i) { void ftnode_pivot_keys::_insert_at_dbt(const DBT *key, int i) {
// make space for a new pivot, slide existing keys to the right // make space for a new pivot, slide existing keys to the right
REALLOC_N(_num_pivots + 1, _dbt_keys); REALLOC_N_ALIGNED(64, _num_pivots + 1, _dbt_keys);
memmove(&_dbt_keys[i + 1], &_dbt_keys[i], (_num_pivots - i) * sizeof(DBT)); memmove(&_dbt_keys[i + 1], &_dbt_keys[i], (_num_pivots - i) * sizeof(DBT));
_add_key_dbt(key, i); _add_key_dbt(key, i);
} }
void ftnode_pivot_keys::_insert_at_fixed(const DBT *key, int i) { void ftnode_pivot_keys::_insert_at_fixed(const DBT *key, int i) {
REALLOC_N((_num_pivots + 1) * _fixed_keylen, _fixed_keys); REALLOC_N_ALIGNED(64, (_num_pivots + 1) * _fixed_keylen_aligned, _fixed_keys);
memmove(_fixed_key(i + 1), _fixed_key(i), (_num_pivots - i) * _fixed_keylen); // TODO: This is not going to be valgrind-safe, because we do not initialize the space
// between _fixed_keylen and _fixed_keylen_aligned (but we probably should)
memmove(_fixed_key(i + 1), _fixed_key(i), (_num_pivots - i) * _fixed_keylen_aligned);
memcpy(_fixed_key(i), key->data, _fixed_keylen); memcpy(_fixed_key(i), key->data, _fixed_keylen);
_total_size += _fixed_keylen; _total_size += _fixed_keylen_aligned;
} }
void ftnode_pivot_keys::insert_at(const DBT *key, int i) { void ftnode_pivot_keys::insert_at(const DBT *key, int i) {
...@@ -303,7 +310,7 @@ void ftnode_pivot_keys::insert_at(const DBT *key, int i) { ...@@ -303,7 +310,7 @@ void ftnode_pivot_keys::insert_at(const DBT *key, int i) {
} }
void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) { void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) {
REALLOC_N(_num_pivots + pivotkeys._num_pivots, _dbt_keys); REALLOC_N_ALIGNED(64, _num_pivots + pivotkeys._num_pivots, _dbt_keys);
bool other_fixed = pivotkeys._fixed_format(); bool other_fixed = pivotkeys._fixed_format();
for (int i = 0; i < pivotkeys._num_pivots; i++) { for (int i = 0; i < pivotkeys._num_pivots; i++) {
toku_memdup_dbt(&_dbt_keys[_num_pivots + i], toku_memdup_dbt(&_dbt_keys[_num_pivots + i],
...@@ -317,7 +324,7 @@ void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) { ...@@ -317,7 +324,7 @@ void ftnode_pivot_keys::_append_dbt(const ftnode_pivot_keys &pivotkeys) {
void ftnode_pivot_keys::_append_fixed(const ftnode_pivot_keys &pivotkeys) { void ftnode_pivot_keys::_append_fixed(const ftnode_pivot_keys &pivotkeys) {
if (pivotkeys._fixed_format() && pivotkeys._fixed_keylen == _fixed_keylen) { if (pivotkeys._fixed_format() && pivotkeys._fixed_keylen == _fixed_keylen) {
// other pivotkeys have the same fixed keylen // other pivotkeys have the same fixed keylen
REALLOC_N((_num_pivots + pivotkeys._num_pivots) * _fixed_keylen, _fixed_keys); REALLOC_N_ALIGNED(64, (_num_pivots + pivotkeys._num_pivots) * _fixed_keylen_aligned, _fixed_keys);
memcpy(_fixed_key(_num_pivots), pivotkeys._fixed_keys, pivotkeys._total_size); memcpy(_fixed_key(_num_pivots), pivotkeys._fixed_keys, pivotkeys._total_size);
} else { } else {
// must convert to dbt format, other pivotkeys have different length'd keys // must convert to dbt format, other pivotkeys have different length'd keys
...@@ -366,15 +373,15 @@ void ftnode_pivot_keys::replace_at(const DBT *key, int i) { ...@@ -366,15 +373,15 @@ void ftnode_pivot_keys::replace_at(const DBT *key, int i) {
} }
void ftnode_pivot_keys::_delete_at_fixed(int i) { void ftnode_pivot_keys::_delete_at_fixed(int i) {
memmove(_fixed_key(i), _fixed_key(i + 1), (_num_pivots - 1 - i) * _fixed_keylen); memmove(_fixed_key(i), _fixed_key(i + 1), (_num_pivots - 1 - i) * _fixed_keylen_aligned);
_total_size -= _fixed_keylen; _total_size -= _fixed_keylen_aligned;
} }
void ftnode_pivot_keys::_delete_at_dbt(int i) { void ftnode_pivot_keys::_delete_at_dbt(int i) {
// slide over existing keys, then shrink down to size // slide over existing keys, then shrink down to size
_destroy_key_dbt(i); _destroy_key_dbt(i);
memmove(&_dbt_keys[i], &_dbt_keys[i + 1], (_num_pivots - 1 - i) * sizeof(DBT)); memmove(&_dbt_keys[i], &_dbt_keys[i + 1], (_num_pivots - 1 - i) * sizeof(DBT));
REALLOC_N(_num_pivots - 1, _dbt_keys); REALLOC_N_ALIGNED(64, _num_pivots - 1, _dbt_keys);
} }
void ftnode_pivot_keys::delete_at(int i) { void ftnode_pivot_keys::delete_at(int i) {
...@@ -395,7 +402,7 @@ void ftnode_pivot_keys::_split_at_fixed(int i, ftnode_pivot_keys *other) { ...@@ -395,7 +402,7 @@ void ftnode_pivot_keys::_split_at_fixed(int i, ftnode_pivot_keys *other) {
// shrink down to size // shrink down to size
_total_size = i * _fixed_keylen; _total_size = i * _fixed_keylen;
REALLOC_N(_total_size, _fixed_keys); REALLOC_N_ALIGNED(64, _total_size, _fixed_keys);
} }
void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) { void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) {
...@@ -406,7 +413,7 @@ void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) { ...@@ -406,7 +413,7 @@ void ftnode_pivot_keys::_split_at_dbt(int i, ftnode_pivot_keys *other) {
for (int k = i; k < _num_pivots; k++) { for (int k = i; k < _num_pivots; k++) {
_destroy_key_dbt(k); _destroy_key_dbt(k);
} }
REALLOC_N(i, _dbt_keys); REALLOC_N_ALIGNED(64, i, _dbt_keys);
} }
void ftnode_pivot_keys::split_at(int i, ftnode_pivot_keys *other) { void ftnode_pivot_keys::split_at(int i, ftnode_pivot_keys *other) {
...@@ -434,12 +441,12 @@ void ftnode_pivot_keys::serialize_to_wbuf(struct wbuf *wb) const { ...@@ -434,12 +441,12 @@ void ftnode_pivot_keys::serialize_to_wbuf(struct wbuf *wb) const {
int ftnode_pivot_keys::num_pivots() const { int ftnode_pivot_keys::num_pivots() const {
// if we have fixed size keys, the number of pivots should be consistent // if we have fixed size keys, the number of pivots should be consistent
paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen * _num_pivots)); paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots));
return _num_pivots; return _num_pivots;
} }
size_t ftnode_pivot_keys::total_size() const { size_t ftnode_pivot_keys::total_size() const {
// if we have fixed size keys, the total size should be consistent // if we have fixed size keys, the total size should be consistent
paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen * _num_pivots)); paranoid_invariant(_fixed_keys == nullptr || (_total_size == _fixed_keylen_aligned * _num_pivots));
return _total_size; return _total_size;
} }
...@@ -352,14 +352,8 @@ void toku_set_func_pread (ssize_t (*)(int, void *, size_t, off_t)); ...@@ -352,14 +352,8 @@ void toku_set_func_pread (ssize_t (*)(int, void *, size_t, off_t));
int toku_portability_init(void); int toku_portability_init(void);
void toku_portability_destroy(void); void toku_portability_destroy(void);
static inline uint64_t roundup_to_multiple(uint64_t alignment, uint64_t v)
// Effect: Return X, where X the smallest multiple of ALIGNMENT such that X>=V. // Effect: Return X, where X the smallest multiple of ALIGNMENT such that X>=V.
// Requires: ALIGNMENT is a power of two // Requires: ALIGNMENT is a power of two
{ static inline uint64_t roundup_to_multiple(uint64_t alignment, uint64_t v) {
assert(0==(alignment&(alignment-1))); // alignment must be a power of two return (v + alignment - 1) & ~(alignment - 1);
uint64_t result = (v+alignment-1)&~(alignment-1);
assert(result>=v); // The result is >=V.
assert(result%alignment==0); // The result is a multiple of alignment.
assert(result<v+alignment); // The result is the smallest such multiple of alignment.
return result;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment