Commit bf3c862f authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-22871: Clean up btr_search_sys

btr_search_sys::parts[]: A single structure for the partitions of
the adaptive hash index. Replaces the 3 separate arrays:
btr_search_latches[], btr_search_sys->hash_tables,
btr_search_sys->hash_tables[i]->heap.

hash_table_t::heap, hash_table_t::adaptive: Remove.

ha0ha.cc: Remove. Move all code to btr0sea.cc.
parent 9159b897
...@@ -56,7 +56,6 @@ SET(INNOBASE_SOURCES ...@@ -56,7 +56,6 @@ SET(INNOBASE_SOURCES
fsp/fsp0space.cc fsp/fsp0space.cc
fsp/fsp0sysspace.cc fsp/fsp0sysspace.cc
fut/fut0lst.cc fut/fut0lst.cc
ha/ha0ha.cc
ha/ha0storage.cc ha/ha0storage.cc
ha/hash0hash.cc ha/hash0hash.cc
fts/fts0fts.cc fts/fts0fts.cc
......
...@@ -3561,7 +3561,7 @@ btr_cur_optimistic_insert( ...@@ -3561,7 +3561,7 @@ btr_cur_optimistic_insert(
ut_ad(index->is_instant()); ut_ad(index->is_instant());
ut_ad(flags == BTR_NO_LOCKING_FLAG); ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else { } else {
rw_lock_t* ahi_latch = btr_get_search_latch(index); rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) { if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert( btr_search_update_hash_node_on_insert(
cursor, ahi_latch); cursor, ahi_latch);
...@@ -3772,7 +3772,7 @@ btr_cur_pessimistic_insert( ...@@ -3772,7 +3772,7 @@ btr_cur_pessimistic_insert(
ut_ad(!(flags & BTR_CREATE_FLAG)); ut_ad(!(flags & BTR_CREATE_FLAG));
} else { } else {
btr_search_update_hash_on_insert( btr_search_update_hash_on_insert(
cursor, btr_get_search_latch(index)); cursor, btr_search_sys.get_latch(*index));
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) { if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
...@@ -4274,7 +4274,7 @@ btr_cur_update_in_place( ...@@ -4274,7 +4274,7 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
{ {
rw_lock_t* ahi_latch = block->index rw_lock_t* ahi_latch = block->index
? btr_get_search_latch(index) : NULL; ? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) { if (ahi_latch) {
/* TO DO: Can we skip this if none of the fields /* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields index->search_info->curr_n_fields
......
...@@ -41,7 +41,6 @@ Created 2/17/1996 Heikki Tuuri ...@@ -41,7 +41,6 @@ Created 2/17/1996 Heikki Tuuri
#include "btr0btr.h" #include "btr0btr.h"
#include "ha0ha.h" #include "ha0ha.h"
#include "srv0mon.h" #include "srv0mon.h"
#include "sync0sync.h"
/** Is search system enabled. /** Is search system enabled.
Search system is protected by array of latches. */ Search system is protected by array of latches. */
...@@ -57,25 +56,8 @@ ulint btr_search_n_succ = 0; ...@@ -57,25 +56,8 @@ ulint btr_search_n_succ = 0;
ulint btr_search_n_hash_fail = 0; ulint btr_search_n_hash_fail = 0;
#endif /* UNIV_SEARCH_PERF_STAT */ #endif /* UNIV_SEARCH_PERF_STAT */
/** padding to prevent other memory update
hotspots from residing on the same memory
cache line as btr_search_latches */
UNIV_INTERN byte btr_sea_pad1[CACHE_LINE_SIZE];
/** The latches protecting the adaptive search system: this latches protects the
(1) positions of records on those pages where a hash index has been built.
NOTE: It does not protect values of non-ordering fields within a record from
being updated in-place! We can use fact (1) to perform unique searches to
indexes. We will allocate the latches from dynamic memory to get it to the
same DRAM page as other hotspot semaphores */
rw_lock_t** btr_search_latches;
/** padding to prevent other memory update hotspots from residing on
the same memory cache line */
UNIV_INTERN byte btr_sea_pad2[CACHE_LINE_SIZE];
/** The adaptive hash index */ /** The adaptive hash index */
btr_search_sys_t* btr_search_sys; btr_search_sys_t btr_search_sys;
/** If the number of records on the page divided by this parameter /** If the number of records on the page divided by this parameter
would have been successfully accessed using a hash index, the index would have been successfully accessed using a hash index, the index
...@@ -187,104 +169,23 @@ probable that, when have reserved the btr search system latch and we need to ...@@ -187,104 +169,23 @@ probable that, when have reserved the btr search system latch and we need to
allocate a new node to the hash table, it will succeed. However, the check allocate a new node to the hash table, it will succeed. However, the check
will not guarantee success. will not guarantee success.
@param[in] index index handler */ @param[in] index index handler */
static static void btr_search_check_free_space_in_heap(const dict_index_t *index)
void
btr_search_check_free_space_in_heap(const dict_index_t* index)
{ {
/* Note that we peek the value of heap->free_block without reserving /* Note that we peek the value of heap->free_block without reserving
the latch: this is ok, because we will not guarantee that there will the latch: this is ok, because we will not guarantee that there will
be enough free space in the hash table. */ be enough free space in the hash table. */
buf_block_t* block = buf_block_alloc(); buf_block_t *block= buf_block_alloc();
rw_lock_t* latch = btr_get_search_latch(index); auto part= btr_search_sys.get_part(*index);
hash_table_t* table;
mem_heap_t* heap;
rw_lock_x_lock(latch);
if (!btr_search_enabled) {
goto func_exit;
}
table = btr_get_search_table(index); rw_lock_x_lock(&part->latch);
heap = table->heap;
if (heap->free_block == NULL) { if (!btr_search_enabled || part->heap->free_block)
heap->free_block = block;
} else {
func_exit:
buf_block_free(block); buf_block_free(block);
} else
part->heap->free_block= block;
rw_lock_x_unlock(latch);
}
/** Creates and initializes the adaptive search system at a database start.
@param[in] hash_size hash table size. */
void btr_search_sys_create(ulint hash_size)
{
/* Search System is divided into n parts.
Each part controls access to distinct set of hash buckets from
hash table through its own latch. */
/* Step-1: Allocate latches (1 per part). */
btr_search_latches = reinterpret_cast<rw_lock_t**>(
ut_malloc(sizeof(rw_lock_t*) * btr_ahi_parts, mem_key_ahi));
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_latches[i] = reinterpret_cast<rw_lock_t*>(
ut_malloc(sizeof(rw_lock_t), mem_key_ahi));
rw_lock_create(btr_search_latch_key,
btr_search_latches[i], SYNC_SEARCH_SYS);
}
/* Step-2: Allocate hash tablees. */
btr_search_sys = reinterpret_cast<btr_search_sys_t*>(
ut_malloc(sizeof(btr_search_sys_t), mem_key_ahi));
btr_search_sys->hash_tables = NULL;
if (btr_search_enabled) {
btr_search_enable();
}
}
/** Frees the adaptive search system at a database shutdown. */
void btr_search_sys_free()
{
if (!btr_search_sys)
{
ut_ad(!btr_search_latches);
return;
}
ut_ad(btr_search_sys);
ut_ad(btr_search_latches);
if (btr_search_sys->hash_tables)
{
for (ulint i= 0; i < btr_ahi_parts; ++i)
{
mem_heap_free(btr_search_sys->hash_tables[i]->heap);
hash_table_free(btr_search_sys->hash_tables[i]);
}
ut_free(btr_search_sys->hash_tables);
}
ut_free(btr_search_sys);
btr_search_sys= nullptr;
/* Free all latches. */
for (ulint i= 0; i < btr_ahi_parts; ++i)
{
rw_lock_free(btr_search_latches[i]);
ut_free(btr_search_latches[i]);
}
ut_free(btr_search_latches); rw_lock_x_unlock(&part->latch);
btr_search_latches= nullptr;
} }
/** Set index->ref_count = 0 on all indexes of a table. /** Set index->ref_count = 0 on all indexes of a table.
...@@ -351,12 +252,7 @@ void btr_search_disable() ...@@ -351,12 +252,7 @@ void btr_search_disable()
buf_pool.clear_hash_index(); buf_pool.clear_hash_index();
/* Clear the adaptive hash index. */ /* Clear the adaptive hash index. */
for (ulint i = 0; i < btr_ahi_parts; ++i) { btr_search_sys.clear();
mem_heap_free(btr_search_sys->hash_tables[i]->heap);
hash_table_free(btr_search_sys->hash_tables[i]);
}
ut_free(btr_search_sys->hash_tables);
btr_search_sys->hash_tables = NULL;
btr_search_x_unlock_all(); btr_search_x_unlock_all();
} }
...@@ -377,27 +273,13 @@ void btr_search_enable(bool resize) ...@@ -377,27 +273,13 @@ void btr_search_enable(bool resize)
btr_search_x_lock_all(); btr_search_x_lock_all();
ulint hash_size = buf_pool_get_curr_size() / sizeof(void *) / 64; ulint hash_size = buf_pool_get_curr_size() / sizeof(void *) / 64;
if (btr_search_sys->hash_tables) { if (btr_search_sys.parts[0].heap) {
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
btr_search_x_unlock_all(); btr_search_x_unlock_all();
return; return;
} }
btr_search_sys->hash_tables = reinterpret_cast<hash_table_t**>( btr_search_sys.alloc(hash_size);
ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi));
for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys->hash_tables[i] =
hash_create(hash_size / btr_ahi_parts);
btr_search_sys->hash_tables[i]->heap = mem_heap_create_typed(
std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
btr_search_sys->hash_tables[i]->adaptive = TRUE;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
}
btr_search_enabled = true; btr_search_enabled = true;
btr_search_x_unlock_all(); btr_search_x_unlock_all();
...@@ -581,6 +463,221 @@ btr_search_update_block_hash_info(btr_search_t* info, buf_block_t* block) ...@@ -581,6 +463,221 @@ btr_search_update_block_hash_info(btr_search_t* info, buf_block_t* block)
return(false); return(false);
} }
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Maximum number of records in a page */
constexpr ulint MAX_N_POINTERS = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
__attribute__((nonnull))
/**
Insert an entry into the hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@param table hash table
@param heap memory heap
@param fold folded value of the record
@param block buffer block containing the record
@param data the record
@retval true on success
@retval false if no more memory could be allocated */
static bool ha_insert_for_fold(hash_table_t *table, mem_heap_t* heap,
ulint fold,
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t *block, /*!< buffer block of data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t *data)
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_ad(btr_search_enabled);
ulint hash = hash_calc_hash(fold, table);
hash_cell_t *cell= hash_get_nth_cell(table, hash);
for (ha_node_t *prev= static_cast<ha_node_t*>(cell->node); prev;
prev= prev->next)
{
if (prev->fold == fold)
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t *prev_block= prev->block;
ut_a(prev_block->frame == page_align(prev->data));
ut_a(prev_block->n_pointers-- < MAX_N_POINTERS);
ut_a(block->n_pointers++ < MAX_N_POINTERS);
prev->block= block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
prev->data= data;
return true;
}
}
/* We have to allocate a new chain node */
ha_node_t *node= static_cast<ha_node_t*>(mem_heap_alloc(heap, sizeof *node));
if (!node)
return false;
ha_node_set_data(node, block, data);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->n_pointers++ < MAX_N_POINTERS);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->fold= fold;
node->next= nullptr;
ha_node_t *prev= static_cast<ha_node_t*>(cell->node);
if (!prev)
cell->node= node;
else
{
while (prev->next)
prev= prev->next;
prev->next= node;
}
return true;
}
__attribute__((nonnull))
/** Delete a record.
@param table hash table
@param heap memory heap
@param del_node record to be deleted */
static void ha_delete_hash_node(hash_table_t *table, mem_heap_t *heap,
ha_node_t *del_node)
{
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(del_node->block->frame == page_align(del_node->data));
ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const ulint fold= del_node->fold;
HASH_DELETE(ha_node_t, next, table, fold, del_node);
ha_node_t *top= static_cast<ha_node_t*>(mem_heap_get_top(heap, sizeof *top));
if (del_node != top)
{
/* Compact the heap of nodes by moving the top in the place of del_node. */
*del_node= *top;
hash_cell_t *cell= hash_get_nth_cell(table, table->calc_hash(top->fold));
/* Look for the pointer to the top node, to update it */
if (cell->node == top)
/* The top node is the first in the chain */
cell->node= del_node;
else
{
/* We have to look for the predecessor */
ha_node_t *node= static_cast<ha_node_t*>(cell->node);
while (top != HASH_GET_NEXT(next, node))
node= static_cast<ha_node_t*>(HASH_GET_NEXT(next, node));
/* Now we have the predecessor node */
node->next= del_node;
}
}
/* Free the occupied space */
mem_heap_free_top(heap, sizeof *top);
}
__attribute__((nonnull))
/** Delete all pointers to a page.
@param table hash table
@param heap memory heap
@param page record to be deleted */
static void ha_remove_all_nodes_to_page(hash_table_t *table, mem_heap_t *heap,
ulint fold, const page_t *page)
{
for (ha_node_t *node= ha_chain_get_first(table, fold); node; )
{
if (page_align(ha_node_get_data(node)) == page)
{
ha_delete_hash_node(table, heap, node);
/* The deletion may compact the heap of nodes and move other nodes! */
node= ha_chain_get_first(table, fold);
}
else
node= ha_chain_get_next(node);
}
#ifdef UNIV_DEBUG
/* Check that all nodes really got deleted */
for (ha_node_t *node= ha_chain_get_first(table, fold); node;
node= ha_chain_get_next(node))
ut_ad(page_align(ha_node_get_data(node)) != page);
#endif /* UNIV_DEBUG */
}
/** Delete a record if found.
@param table hash table
@param heap memory heap for the hash bucket chain
@param fold folded value of the searched data
@param data pointer to the record
@return whether the record was found */
static bool ha_search_and_delete_if_found(hash_table_t *table,
mem_heap_t *heap,
ulint fold, const rec_t *data)
{
if (ha_node_t *node= ha_search_with_data(table, fold, data))
{
ha_delete_hash_node(table, heap, node);
return true;
}
return false;
}
__attribute__((nonnull))
/** Looks for an element when we know the pointer to the data and
updates the pointer to data if found.
@param table hash table
@param fold folded value of the searched data
@param data pointer to the data
@param new_data new pointer to the data
@return whether the element was found */
static bool ha_search_and_update_if_found(hash_table_t *table, ulint fold,
const rec_t *data,
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** block containing new_data */
buf_block_t *new_block,
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t *new_data)
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
if (!btr_search_enabled)
return false;
if (ha_node_t *node= ha_search_with_data(table, fold, data))
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(node->block->n_pointers-- < MAX_N_POINTERS);
ut_a(new_block->n_pointers++ < MAX_N_POINTERS);
node->block= new_block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data= new_data;
return true;
}
return false;
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
#else
# define ha_insert_for_fold(t,h,f,b,d) ha_insert_for_fold(t,h,f,d)
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found(table,fold,data,new_data)
#endif
/** Updates a hash node reference when it has been unsuccessfully used in a /** Updates a hash node reference when it has been unsuccessfully used in a
search which could have succeeded with the used hash parameters. This can search which could have succeeded with the used hash parameters. This can
happen because when building a hash index for a page, we do not check happen because when building a hash index for a page, we do not check
...@@ -615,8 +712,8 @@ btr_search_update_hash_ref( ...@@ -615,8 +712,8 @@ btr_search_update_hash_ref(
ut_ad(block->page.id().space() == index->table->space_id); ut_ad(block->page.id().space() == index->table->space_id);
ut_ad(index == cursor->index); ut_ad(index == cursor->index);
ut_ad(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
rw_lock_t* const latch = btr_get_search_latch(index); auto part = btr_search_sys.get_part(*index);
rw_lock_x_lock(latch); rw_lock_x_lock(&part->latch);
ut_ad(!block->index || block->index == index); ut_ad(!block->index || block->index == index);
if (block->index if (block->index
...@@ -644,14 +741,13 @@ btr_search_update_hash_ref( ...@@ -644,14 +741,13 @@ btr_search_update_hash_ref(
mem_heap_free(heap); mem_heap_free(heap);
} }
ha_insert_for_fold(btr_get_search_table(index), fold, ha_insert_for_fold(&part->table, heap, fold, block, rec);
block, rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
func_exit: func_exit:
rw_lock_x_unlock(latch); rw_lock_x_unlock(&part->latch);
} }
/** Checks if a guessed position for a tree cursor is right. Note that if /** Checks if a guessed position for a tree cursor is right. Note that if
...@@ -926,7 +1022,8 @@ btr_search_guess_on_hash( ...@@ -926,7 +1022,8 @@ btr_search_guess_on_hash(
} }
ut_ad(!index->is_ibuf()); ut_ad(!index->is_ibuf());
ut_ad(!ahi_latch || ahi_latch == btr_get_search_latch(index)); ut_ad(!ahi_latch
|| ahi_latch == &btr_search_sys.get_part(*index)->latch);
ut_ad((latch_mode == BTR_SEARCH_LEAF) ut_ad((latch_mode == BTR_SEARCH_LEAF)
|| (latch_mode == BTR_MODIFY_LEAF)); || (latch_mode == BTR_MODIFY_LEAF));
compile_time_assert(ulint{BTR_SEARCH_LEAF} == ulint{RW_S_LATCH}); compile_time_assert(ulint{BTR_SEARCH_LEAF} == ulint{RW_S_LATCH});
...@@ -959,11 +1056,11 @@ btr_search_guess_on_hash( ...@@ -959,11 +1056,11 @@ btr_search_guess_on_hash(
cursor->fold = fold; cursor->fold = fold;
cursor->flag = BTR_CUR_HASH; cursor->flag = BTR_CUR_HASH;
rw_lock_t* use_latch = ahi_latch ? NULL : btr_get_search_latch(index); auto part = btr_search_sys.get_part(*index);
const rec_t* rec; const rec_t* rec;
if (use_latch) { if (!ahi_latch) {
rw_lock_s_lock(use_latch); rw_lock_s_lock(&part->latch);
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto fail; goto fail;
...@@ -974,12 +1071,12 @@ btr_search_guess_on_hash( ...@@ -974,12 +1071,12 @@ btr_search_guess_on_hash(
} }
rec = static_cast<const rec_t*>( rec = static_cast<const rec_t*>(
ha_search_and_get_data(btr_get_search_table(index), fold)); ha_search_and_get_data(&part->table, fold));
if (!rec) { if (!rec) {
if (use_latch) { if (!ahi_latch) {
fail: fail:
rw_lock_s_unlock(use_latch); rw_lock_s_unlock(&part->latch);
} }
btr_search_failure(info, cursor); btr_search_failure(info, cursor);
...@@ -988,7 +1085,7 @@ btr_search_guess_on_hash( ...@@ -988,7 +1085,7 @@ btr_search_guess_on_hash(
buf_block_t* block = buf_pool.block_from_ahi(rec); buf_block_t* block = buf_pool.block_from_ahi(rec);
if (use_latch) { if (!ahi_latch) {
rw_lock_t* hash_lock = buf_pool.hash_lock_get( rw_lock_t* hash_lock = buf_pool.hash_lock_get(
block->page.id()); block->page.id());
rw_lock_s_lock(hash_lock); rw_lock_s_lock(hash_lock);
...@@ -1032,7 +1129,7 @@ btr_search_guess_on_hash( ...@@ -1032,7 +1129,7 @@ btr_search_guess_on_hash(
buf_pool.stat.n_page_gets++; buf_pool.stat.n_page_gets++;
rw_lock_s_unlock(use_latch); rw_lock_s_unlock(&part->latch);
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
if (UNIV_UNLIKELY(fail)) { if (UNIV_UNLIKELY(fail)) {
...@@ -1149,7 +1246,6 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1149,7 +1246,6 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
ulint i; ulint i;
mem_heap_t* heap; mem_heap_t* heap;
rec_offs* offsets; rec_offs* offsets;
rw_lock_t* latch;
retry: retry:
/* This debug check uses a dirty read that could theoretically cause /* This debug check uses a dirty read that could theoretically cause
...@@ -1175,17 +1271,15 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1175,17 +1271,15 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
const index_id_t index_id const index_id_t index_id
= btr_page_get_index_id(block->frame); = btr_page_get_index_id(block->frame);
const ulint ahi_slot
= ut_fold_ulint_pair(static_cast<ulint>(index_id),
block->page.id().space())
% btr_ahi_parts;
latch = btr_search_latches[ahi_slot];
rw_lock_s_lock(latch); auto part = btr_search_sys.get_part(index_id,
block->page.id().space());
rw_lock_s_lock(&part->latch);
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
if (!block->index || !btr_search_enabled) { if (!block->index || !btr_search_enabled) {
rw_lock_s_unlock(latch); rw_lock_s_unlock(&part->latch);
return; return;
} }
...@@ -1225,7 +1319,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1225,7 +1319,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
/* NOTE: The AHI fields of block must not be accessed after /* NOTE: The AHI fields of block must not be accessed after
releasing search latch, as the index page might only be s-latched! */ releasing search latch, as the index page might only be s-latched! */
rw_lock_s_unlock(latch); rw_lock_s_unlock(&part->latch);
ut_a(n_fields > 0 || n_bytes > 0); ut_a(n_fields > 0 || n_bytes > 0);
...@@ -1276,7 +1370,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1276,7 +1370,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
mem_heap_free(heap); mem_heap_free(heap);
} }
rw_lock_x_lock(latch); rw_lock_x_lock(&part->latch);
if (UNIV_UNLIKELY(!block->index)) { if (UNIV_UNLIKELY(!block->index)) {
/* Someone else has meanwhile dropped the hash index */ /* Someone else has meanwhile dropped the hash index */
...@@ -1292,16 +1386,14 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1292,16 +1386,14 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
/* Someone else has meanwhile built a new hash index on the /* Someone else has meanwhile built a new hash index on the
page, with different parameters */ page, with different parameters */
rw_lock_x_unlock(latch); rw_lock_x_unlock(&part->latch);
ut_free(folds); ut_free(folds);
goto retry; goto retry;
} }
for (i = 0; i < n_cached; i++) { for (i = 0; i < n_cached; i++) {
ha_remove_all_nodes_to_page(&part->table, part->heap,
ha_remove_all_nodes_to_page(
btr_search_sys->hash_tables[ahi_slot],
folds[i], page); folds[i], page);
} }
...@@ -1321,7 +1413,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block) ...@@ -1321,7 +1413,7 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
cleanup: cleanup:
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
rw_lock_x_unlock(latch); rw_lock_x_unlock(&part->latch);
ut_free(folds); ut_free(folds);
} }
...@@ -1397,7 +1489,6 @@ btr_search_build_page_hash_index( ...@@ -1397,7 +1489,6 @@ btr_search_build_page_hash_index(
ulint n_recs; ulint n_recs;
ulint* folds; ulint* folds;
const rec_t** recs; const rec_t** recs;
ulint i;
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_; rec_offs* offsets = offsets_;
...@@ -1410,7 +1501,7 @@ btr_search_build_page_hash_index( ...@@ -1410,7 +1501,7 @@ btr_search_build_page_hash_index(
} }
rec_offs_init(offsets_); rec_offs_init(offsets_);
ut_ad(ahi_latch == btr_get_search_latch(index)); ut_ad(ahi_latch == &btr_search_sys.get_part(*index)->latch);
ut_ad(index); ut_ad(index);
ut_ad(block->page.id().space() == index->table->space_id); ut_ad(block->page.id().space() == index->table->space_id);
ut_ad(!dict_index_is_ibuf(index)); ut_ad(!dict_index_is_ibuf(index));
...@@ -1534,20 +1625,12 @@ btr_search_build_page_hash_index( ...@@ -1534,20 +1625,12 @@ btr_search_build_page_hash_index(
btr_search_check_free_space_in_heap(index); btr_search_check_free_space_in_heap(index);
hash_table_t* table = btr_get_search_table(index);
rw_lock_x_lock(ahi_latch); rw_lock_x_lock(ahi_latch);
if (!btr_search_enabled) { if (!btr_search_enabled) {
goto exit_func; goto exit_func;
} }
table = btr_get_search_table(index);
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) {
goto exit_func;
}
/* This counter is decremented every time we drop page /* This counter is decremented every time we drop page
hash index entries and is incremented here. Since we can hash index entries and is incremented here. Since we can
rebuild hash index for a page that is already hashed, we rebuild hash index for a page that is already hashed, we
...@@ -1556,6 +1639,10 @@ btr_search_build_page_hash_index( ...@@ -1556,6 +1639,10 @@ btr_search_build_page_hash_index(
if (!block->index) { if (!block->index) {
assert_block_ahi_empty(block); assert_block_ahi_empty(block);
index->search_info->ref_count++; index->search_info->ref_count++;
} else if (block->curr_n_fields != n_fields
|| block->curr_n_bytes != n_bytes
|| block->curr_left_side != left_side) {
goto exit_func;
} }
block->n_hash_helps = 0; block->n_hash_helps = 0;
...@@ -1565,9 +1652,13 @@ btr_search_build_page_hash_index( ...@@ -1565,9 +1652,13 @@ btr_search_build_page_hash_index(
block->curr_left_side = left_side; block->curr_left_side = left_side;
block->index = index; block->index = index;
for (i = 0; i < n_cached; i++) { {
auto part = btr_search_sys.get_part(*index);
ha_insert_for_fold(table, folds[i], block, recs[i]); for (ulint i = 0; i < n_cached; i++) {
ha_insert_for_fold(&part->table, part->heap,
folds[i], block, recs[i]);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
} }
MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_ADDED); MONITOR_INC(MONITOR_ADAPTIVE_HASH_PAGE_ADDED);
...@@ -1589,8 +1680,8 @@ btr_search_build_page_hash_index( ...@@ -1589,8 +1680,8 @@ btr_search_build_page_hash_index(
void void
btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor) btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor)
{ {
rw_lock_t* ahi_latch = btr_get_search_latch(cursor->index); rw_lock_t* ahi_latch = &btr_search_sys.get_part(*cursor->index)
->latch;
ut_ad(!rw_lock_own_flagged(ahi_latch, ut_ad(!rw_lock_own_flagged(ahi_latch,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)); RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
...@@ -1659,7 +1750,9 @@ btr_search_move_or_delete_hash_entries( ...@@ -1659,7 +1750,9 @@ btr_search_move_or_delete_hash_entries(
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
assert_block_ahi_valid(new_block); assert_block_ahi_valid(new_block);
rw_lock_t* ahi_latch = index ? btr_get_search_latch(index) : NULL; rw_lock_t* ahi_latch = index
? &btr_search_sys.get_part(*index)->latch
: nullptr;
if (new_block->index) { if (new_block->index) {
btr_search_drop_page_hash_index(block); btr_search_drop_page_hash_index(block);
...@@ -1745,17 +1838,16 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) ...@@ -1745,17 +1838,16 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
mem_heap_free(heap); mem_heap_free(heap);
} }
rw_lock_t* ahi_latch = btr_get_search_latch(index); auto part = btr_search_sys.get_part(*index);
rw_lock_x_lock(ahi_latch); rw_lock_x_lock(&part->latch);
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
if (btr_search_enabled) { if (block->index && btr_search_enabled) {
hash_table_t* table = btr_get_search_table(index);
if (block->index) {
ut_a(block->index == index); ut_a(block->index == index);
if (ha_search_and_delete_if_found(table, fold, rec)) { if (ha_search_and_delete_if_found(&part->table, part->heap,
fold, rec)) {
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVED); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVED);
} else { } else {
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_REMOVE_NOT_FOUND);
...@@ -1763,9 +1855,8 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) ...@@ -1763,9 +1855,8 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
} }
}
rw_lock_x_unlock(ahi_latch); rw_lock_x_unlock(&part->latch);
} }
/** Updates the page hash index when a single record is inserted on a page. /** Updates the page hash index when a single record is inserted on a page.
...@@ -1776,12 +1867,11 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) ...@@ -1776,12 +1867,11 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
void void
btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
{ {
hash_table_t* table;
buf_block_t* block; buf_block_t* block;
dict_index_t* index; dict_index_t* index;
rec_t* rec; rec_t* rec;
ut_ad(ahi_latch == btr_get_search_latch(cursor->index)); ut_ad(ahi_latch == &btr_search_sys.get_part(*cursor->index)->latch);
ut_ad(!btr_search_own_any(RW_LOCK_S)); ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X)); ut_ad(!btr_search_own_any(RW_LOCK_X));
#ifdef MYSQL_INDEX_DISABLE_AHI #ifdef MYSQL_INDEX_DISABLE_AHI
...@@ -1820,10 +1910,9 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1820,10 +1910,9 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
&& (cursor->n_bytes == block->curr_n_bytes) && (cursor->n_bytes == block->curr_n_bytes)
&& !block->curr_left_side) { && !block->curr_left_side) {
table = btr_get_search_table(index);
if (ha_search_and_update_if_found( if (ha_search_and_update_if_found(
table, cursor->fold, rec, block, &btr_search_sys.get_part(*cursor->index)->table,
cursor->fold, rec, block,
page_rec_get_next(rec))) { page_rec_get_next(rec))) {
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_UPDATED); MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_UPDATED);
} }
...@@ -1847,7 +1936,7 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1847,7 +1936,7 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
void void
btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
{ {
hash_table_t* table; btr_search_sys_t::partition* part;
buf_block_t* block; buf_block_t* block;
dict_index_t* index; dict_index_t* index;
const rec_t* rec; const rec_t* rec;
...@@ -1863,7 +1952,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1863,7 +1952,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
rec_offs* offsets = offsets_; rec_offs* offsets = offsets_;
rec_offs_init(offsets_); rec_offs_init(offsets_);
ut_ad(ahi_latch == btr_get_search_latch(cursor->index)); ut_ad(ahi_latch == &btr_search_sys.get_part(*cursor->index)->latch);
ut_ad(page_is_leaf(btr_cur_get_page(cursor))); ut_ad(page_is_leaf(btr_cur_get_page(cursor)));
ut_ad(!btr_search_own_any(RW_LOCK_S)); ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X)); ut_ad(!btr_search_own_any(RW_LOCK_X));
...@@ -1932,8 +2021,10 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1932,8 +2021,10 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
goto function_exit; goto function_exit;
} }
table = btr_get_search_table(index); part = btr_search_sys.get_part(*index);
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
goto check_next_rec; goto check_next_rec;
...@@ -1948,14 +2039,17 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1948,14 +2039,17 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
if (!btr_search_enabled || !block->index) { if (!btr_search_enabled || !block->index) {
goto function_exit; goto function_exit;
} }
table = btr_get_search_table(index); part = btr_search_sys.get_part(*index);
} }
if (!left_side) { if (!left_side) {
ha_insert_for_fold(table, fold, block, rec); ha_insert_for_fold(&part->table, part->heap,
fold, block, rec);
} else { } else {
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
} }
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
check_next_rec: check_next_rec:
...@@ -1969,10 +2063,12 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1969,10 +2063,12 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
if (!btr_search_enabled || !block->index) { if (!btr_search_enabled || !block->index) {
goto function_exit; goto function_exit;
} }
table = btr_get_search_table(index); part = btr_search_sys.get_part(*index);
} }
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
goto function_exit; goto function_exit;
...@@ -1986,14 +2082,17 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -1986,14 +2082,17 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
if (!btr_search_enabled || !block->index) { if (!btr_search_enabled || !block->index) {
goto function_exit; goto function_exit;
} }
table = btr_get_search_table(index); part = btr_search_sys.get_part(*index);
} }
if (!left_side) { if (!left_side) {
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
} else { } else {
ha_insert_for_fold(table, next_fold, block, next_rec); ha_insert_for_fold(&part->table, part->heap,
next_fold, block, next_rec);
} }
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
} }
function_exit: function_exit:
...@@ -2007,6 +2106,31 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ...@@ -2007,6 +2106,31 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
} }
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
__attribute__((nonnull))
/** @return whether a range of the cells is valid */
static bool ha_validate(const hash_table_t *table,
ulint start_index, ulint end_index)
{
ut_a(start_index <= end_index);
ut_a(end_index < table->n_cells);
bool ok= true;
for (ulint i= start_index; i <= end_index; i++)
{
for (auto node= static_cast<const ha_node_t*>(table->array[i].node); node;
node= node->next)
{
if (table->calc_hash(node->fold) != i) {
ib::error() << "Hash table node fold value " << node->fold
<< " does not match the cell number " << i;
ok= false;
}
}
}
return ok;
}
/** Validates the search system for given hash table. /** Validates the search system for given hash table.
@param[in] hash_table_id hash table to validate @param[in] hash_table_id hash table to validate
...@@ -2037,8 +2161,9 @@ btr_search_hash_table_validate(ulint hash_table_id) ...@@ -2037,8 +2161,9 @@ btr_search_hash_table_validate(ulint hash_table_id)
mutex_enter(&buf_pool.mutex); mutex_enter(&buf_pool.mutex);
cell_count = hash_get_n_cells( auto &part = btr_search_sys.parts[hash_table_id];
btr_search_sys->hash_tables[hash_table_id]);
cell_count = hash_get_n_cells(&part.table);
for (i = 0; i < cell_count; i++) { for (i = 0; i < cell_count; i++) {
/* We release search latches every once in a while to /* We release search latches every once in a while to
...@@ -2059,8 +2184,7 @@ btr_search_hash_table_validate(ulint hash_table_id) ...@@ -2059,8 +2184,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
mutex_enter(&buf_pool.mutex); mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells( ulint curr_cell_count = hash_get_n_cells(&part.table);
btr_search_sys->hash_tables[hash_table_id]);
if (cell_count != curr_cell_count) { if (cell_count != curr_cell_count) {
...@@ -2072,8 +2196,7 @@ btr_search_hash_table_validate(ulint hash_table_id) ...@@ -2072,8 +2196,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
} }
} }
node = (ha_node_t*) hash_get_nth_cell( node = (ha_node_t*) hash_get_nth_cell(&part.table, i)->node;
btr_search_sys->hash_tables[hash_table_id], i)->node;
for (; node != NULL; node = node->next) { for (; node != NULL; node = node->next) {
const buf_block_t* block const buf_block_t* block
...@@ -2169,8 +2292,7 @@ btr_search_hash_table_validate(ulint hash_table_id) ...@@ -2169,8 +2292,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
mutex_enter(&buf_pool.mutex); mutex_enter(&buf_pool.mutex);
ulint curr_cell_count = hash_get_n_cells( ulint curr_cell_count = hash_get_n_cells(&part.table);
btr_search_sys->hash_tables[hash_table_id]);
if (cell_count != curr_cell_count) { if (cell_count != curr_cell_count) {
...@@ -2184,8 +2306,7 @@ btr_search_hash_table_validate(ulint hash_table_id) ...@@ -2184,8 +2306,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1); ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
if (!ha_validate(btr_search_sys->hash_tables[hash_table_id], if (!ha_validate(&part.table, i, end_index)) {
i, end_index)) {
ok = FALSE; ok = FALSE;
} }
} }
......
...@@ -1552,7 +1552,7 @@ bool buf_pool_t::create() ...@@ -1552,7 +1552,7 @@ bool buf_pool_t::create()
chunk_t::map_ref= chunk_t::map_reg; chunk_t::map_ref= chunk_t::map_reg;
buf_LRU_old_ratio_update(100 * 3 / 8, false); buf_LRU_old_ratio_update(100 * 3 / 8, false);
btr_search_sys_create(srv_buf_pool_curr_size / sizeof(void*) / 64); btr_search_sys_create();
ut_ad(is_initialised()); ut_ad(is_initialised());
return false; return false;
} }
......
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*****************************************************************************/
/********************************************************************//**
@file ha/ha0ha.cc
The hash table with external chains
Created 8/22/1994 Heikki Tuuri
*************************************************************************/
#include "ha0ha.h"
#ifdef UNIV_DEBUG
# include "buf0buf.h"
#endif /* UNIV_DEBUG */
#include "btr0sea.h"
#include "page0page.h"
#ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Maximum number of records in a page */
static const ulint MAX_N_POINTERS
= UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted. If btr_search_enabled is set to FALSE, we will only allow
updating existing nodes, but no new node is allowed to be added.
@return TRUE if succeed, FALSE if no more memory could be allocated */
ibool
ha_insert_for_fold_func(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of data; if a node with
the same fold value already exists, it is
updated to point to the same data, and no new
node is created! */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data) /*!< in: data, must not be NULL */
{
hash_cell_t* cell;
ha_node_t* node;
ha_node_t* prev_node;
ulint hash;
ut_ad(data);
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(table->heap->type & MEM_HEAP_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_ad(btr_search_enabled);
hash = hash_calc_hash(fold, table);
cell = hash_get_nth_cell(table, hash);
prev_node = static_cast<ha_node_t*>(cell->node);
while (prev_node != NULL) {
if (prev_node->fold == fold) {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
buf_block_t* prev_block = prev_node->block;
ut_a(prev_block->frame
== page_align(prev_node->data));
ut_a(prev_block->n_pointers-- < MAX_N_POINTERS);
ut_a(block->n_pointers++ < MAX_N_POINTERS);
}
prev_node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
prev_node->data = data;
return(TRUE);
}
prev_node = prev_node->next;
}
/* We have to allocate a new chain node */
node = static_cast<ha_node_t*>(
mem_heap_alloc(table->heap, sizeof(ha_node_t)));
if (node == NULL) {
/* It was a btr search type memory heap and at the moment
no more memory could be allocated: return */
return(FALSE);
}
ha_node_set_data(node, block, data);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(block->n_pointers++ < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->fold = fold;
node->next = NULL;
prev_node = static_cast<ha_node_t*>(cell->node);
if (prev_node == NULL) {
cell->node = node;
return(TRUE);
}
while (prev_node->next != NULL) {
prev_node = prev_node->next;
}
prev_node->next = node;
return(TRUE);
}
#ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched
@param table hash table */
void ha_btr_search_latch_x_locked(const hash_table_t* table)
{
ulint i;
for (i = 0; i < btr_ahi_parts; ++i) {
if (btr_search_sys->hash_tables[i] == table) {
break;
}
}
ut_ad(i < btr_ahi_parts);
ut_ad(rw_lock_own(btr_search_latches[i], RW_LOCK_X));
}
#endif /* UNIV_DEBUG */
/***********************************************************//**
Deletes a hash node. */
void
ha_delete_hash_node(
/*================*/
hash_table_t* table, /*!< in: hash table */
ha_node_t* del_node) /*!< in: node to be deleted */
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(table->adaptive);
ut_a(del_node->block->frame == page_align(del_node->data));
ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ha_node_t* node;
const ulint fold = del_node->fold;
HASH_DELETE(ha_node_t, next, table, fold, del_node);
ha_node_t* top_node = (ha_node_t*) mem_heap_get_top(table->heap,
sizeof(ha_node_t));
/* If the node to remove is not the top node in the heap, compact the
heap of nodes by moving the top node in the place of del_node. */
if (del_node != top_node) {
/* Copy the top node in place of del_node */
*del_node = *top_node;
hash_cell_t* cell = hash_get_nth_cell(
table, hash_calc_hash(top_node->fold, table));
/* Look for the pointer to the top node, to update it */
if (cell->node == top_node) {
/* The top node is the first in the chain */
cell->node = del_node;
} else {
/* We have to look for the predecessor */
node = static_cast<ha_node_t*>(cell->node);
while (top_node != HASH_GET_NEXT(next, node)) {
node = static_cast<ha_node_t*>(
HASH_GET_NEXT(next, node));
}
/* Now we have the predecessor node */
node->next = del_node;
}
}
/* Free the space occupied by the top node */
mem_heap_free_top(table->heap, sizeof(ha_node_t));
}
/*********************************************************//**
Looks for an element when we know the pointer to the data, and updates
the pointer to data, if found.
@return TRUE if found */
ibool
ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* new_data)/*!< in: new pointer to the data */
{
ha_node_t* node;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_d(ha_btr_search_latch_x_locked(table));
if (!btr_search_enabled) {
return(FALSE);
}
node = ha_search_with_data(table, fold, data);
if (node) {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(node->block->n_pointers-- < MAX_N_POINTERS);
ut_a(new_block->n_pointers++ < MAX_N_POINTERS);
}
node->block = new_block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = new_data;
return(TRUE);
}
return(FALSE);
}
/*****************************************************************//**
Removes from the chain determined by fold all nodes whose data pointer
points to the page given. */
void
ha_remove_all_nodes_to_page(
/*========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: fold value */
const page_t* page) /*!< in: buffer page */
{
ha_node_t* node;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(btr_search_enabled);
ut_d(ha_btr_search_latch_x_locked(table));
node = ha_chain_get_first(table, fold);
while (node) {
if (page_align(ha_node_get_data(node)) == page) {
/* Remove the hash node */
ha_delete_hash_node(table, node);
/* Start again from the first node in the chain
because the deletion may compact the heap of
nodes and move other nodes! */
node = ha_chain_get_first(table, fold);
} else {
node = ha_chain_get_next(node);
}
}
#ifdef UNIV_DEBUG
/* Check that all nodes really got deleted */
node = ha_chain_get_first(table, fold);
while (node) {
ut_a(page_align(ha_node_get_data(node)) != page);
node = ha_chain_get_next(node);
}
#endif /* UNIV_DEBUG */
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
@return TRUE if ok */
ibool
ha_validate(
/*========*/
hash_table_t* table, /*!< in: hash table */
ulint start_index, /*!< in: start index */
ulint end_index) /*!< in: end index */
{
ibool ok = TRUE;
ulint i;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_a(start_index <= end_index);
ut_a(start_index < hash_get_n_cells(table));
ut_a(end_index < hash_get_n_cells(table));
for (i = start_index; i <= end_index; i++) {
ha_node_t* node;
hash_cell_t* cell;
cell = hash_get_nth_cell(table, i);
for (node = static_cast<ha_node_t*>(cell->node);
node != 0;
node = node->next) {
if (hash_calc_hash(node->fold, table) != i) {
ib::error() << "Hash table node fold value "
<< node->fold << " does not match the"
" cell number " << i << ".";
ok = FALSE;
}
}
}
return(ok);
}
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
...@@ -28,20 +28,23 @@ Created 5/20/1997 Heikki Tuuri ...@@ -28,20 +28,23 @@ Created 5/20/1997 Heikki Tuuri
#include "mem0mem.h" #include "mem0mem.h"
#include "sync0sync.h" #include "sync0sync.h"
/** Create the hash table.
@param n the lower bound of n_cells */
void hash_table_t::create(ulint n)
{
n_cells= ut_find_prime(n);
array= static_cast<hash_cell_t*>(ut_zalloc_nokey(n_cells * sizeof *array));
}
/** /**
Create a hash table. Create a hash table.
@param n the minimum number of hash array elements @param n the minimum number of hash array elements
@return created table (with n_cells being a prime, at least n) */ @return created table (with n_cells being a prime, at least n) */
hash_table_t *hash_create(ulint n) hash_table_t *hash_create(ulint n)
{ {
ulint prime= ut_find_prime(n);
hash_table_t *table= static_cast<hash_table_t*> hash_table_t *table= static_cast<hash_table_t*>
(ut_zalloc_nokey(sizeof *table)); (ut_zalloc_nokey(sizeof *table));
table->array= static_cast<hash_cell_t*>(ut_zalloc_nokey(sizeof(hash_cell_t) * table->create(n);
prime));
table->n_cells= prime;
ut_d(table->magic_n= HASH_TABLE_MAGIC_N);
return table; return table;
} }
...@@ -52,8 +55,6 @@ hash_table_free( ...@@ -52,8 +55,6 @@ hash_table_free(
/*============*/ /*============*/
hash_table_t* table) /*!< in, own: hash table */ hash_table_t* table) /*!< in, own: hash table */
{ {
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_free(table->array); ut_free(table->array);
ut_free(table); ut_free(table);
} }
...@@ -30,13 +30,10 @@ Created 2/17/1996 Heikki Tuuri ...@@ -30,13 +30,10 @@ Created 2/17/1996 Heikki Tuuri
#include "dict0dict.h" #include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h" #include "ha0ha.h"
#include "sync0sync.h"
/** Creates and initializes the adaptive search system at a database start. #define btr_search_sys_create() btr_search_sys.create()
@param[in] hash_size hash table size. */ #define btr_search_sys_free() btr_search_sys.free()
void btr_search_sys_create(ulint hash_size);
/** Frees the adaptive search system at a database shutdown. */
void btr_search_sys_free();
/** Disable the adaptive hash search system and empty the index. */ /** Disable the adaptive hash search system and empty the index. */
void btr_search_disable(); void btr_search_disable();
...@@ -162,19 +159,8 @@ static inline bool btr_search_own_any(); ...@@ -162,19 +159,8 @@ static inline bool btr_search_own_any();
/** Unlock all search latches from shared mode. */ /** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all(); static inline void btr_search_s_unlock_all();
/** Get the latch based on index attributes.
A latch is selected from an array of latches using pair of index-id, space-id.
@param[in] index index handler
@return latch */
static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index);
/** Get the hash-table based on index attributes.
A table is selected from an array of tables using pair of index-id, space-id.
@param[in] index index handler
@return hash table */
static inline hash_table_t* btr_get_search_table(const dict_index_t* index);
#else /* BTR_CUR_HASH_ADAPT */ #else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create(size) # define btr_search_sys_create()
# define btr_search_sys_free() # define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block) # define btr_search_drop_page_hash_index(block)
# define btr_search_s_lock_all(index) # define btr_search_s_lock_all(index)
...@@ -259,31 +245,119 @@ struct btr_search_t{ ...@@ -259,31 +245,119 @@ struct btr_search_t{
}; };
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/** The hash index system */
struct btr_search_sys_t
{
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
rw_lock_t latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(rw_lock_t) -
sizeof(hash_table_t) - sizeof(mem_heap_t)) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
void init()
{
memset((void*) this, 0, sizeof *this);
rw_lock_create(btr_search_latch_key, &latch, SYNC_SEARCH_SYS);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
rw_lock_free(&latch);
if (heap)
clear();
}
};
/** Partitions of the adaptive hash index */
partition *parts;
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
{
ut_ad(index.table->space->id == index.table->space_id);
return get_part(ulint(index.id), index.table->space_id);
}
/** Get the search latch for the adaptive hash index partition */
rw_lock_t *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
/** Free at shutdown */
void free()
{
if (parts)
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
}
};
/** The adaptive hash index */
extern btr_search_sys_t btr_search_sys;
/** @return number of leaf pages pointed to by the adaptive hash index */ /** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint dict_index_t::n_ahi_pages() const inline ulint dict_index_t::n_ahi_pages() const
{ {
if (!btr_search_enabled) if (!btr_search_enabled)
return 0; return 0;
rw_lock_t *latch = btr_get_search_latch(this); rw_lock_t *latch = &btr_search_sys.get_part(*this)->latch;
rw_lock_s_lock(latch); rw_lock_s_lock(latch);
ulint ref_count= search_info->ref_count; ulint ref_count= search_info->ref_count;
rw_lock_s_unlock(latch); rw_lock_s_unlock(latch);
return ref_count; return ref_count;
} }
/** The hash index system */
struct btr_search_sys_t{
hash_table_t** hash_tables; /*!< the adaptive hash tables,
mapping dtuple_fold values
to rec_t pointers on index pages */
};
/** Latches protecting access to adaptive hash index. */
extern rw_lock_t** btr_search_latches;
/** The adaptive hash index */
extern btr_search_sys_t* btr_search_sys;
#ifdef UNIV_SEARCH_PERF_STAT #ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */ /** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ; extern ulint btr_search_n_succ;
......
...@@ -88,7 +88,7 @@ btr_search_info_update( ...@@ -88,7 +88,7 @@ btr_search_info_update(
static inline void btr_search_x_lock_all() static inline void btr_search_x_lock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_x_lock(btr_search_latches[i]); rw_lock_x_lock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -96,7 +96,7 @@ static inline void btr_search_x_lock_all() ...@@ -96,7 +96,7 @@ static inline void btr_search_x_lock_all()
static inline void btr_search_x_unlock_all() static inline void btr_search_x_unlock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_x_unlock(btr_search_latches[i]); rw_lock_x_unlock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -104,7 +104,7 @@ static inline void btr_search_x_unlock_all() ...@@ -104,7 +104,7 @@ static inline void btr_search_x_unlock_all()
static inline void btr_search_s_lock_all() static inline void btr_search_s_lock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_s_lock(btr_search_latches[i]); rw_lock_s_lock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -112,7 +112,7 @@ static inline void btr_search_s_lock_all() ...@@ -112,7 +112,7 @@ static inline void btr_search_s_lock_all()
static inline void btr_search_s_unlock_all() static inline void btr_search_s_unlock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_s_unlock(btr_search_latches[i]); rw_lock_s_unlock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -124,7 +124,7 @@ static inline void btr_search_s_unlock_all() ...@@ -124,7 +124,7 @@ static inline void btr_search_s_unlock_all()
static inline bool btr_search_own_all(ulint mode) static inline bool btr_search_own_all(ulint mode)
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
if (!rw_lock_own(btr_search_latches[i], mode)) { if (!rw_lock_own(&btr_search_sys.parts[i].latch, mode)) {
return(false); return(false);
} }
} }
...@@ -138,7 +138,7 @@ static inline bool btr_search_own_all(ulint mode) ...@@ -138,7 +138,7 @@ static inline bool btr_search_own_all(ulint mode)
static inline bool btr_search_own_any(ulint mode) static inline bool btr_search_own_any(ulint mode)
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
if (rw_lock_own(btr_search_latches[i], mode)) { if (rw_lock_own(&btr_search_sys.parts[i].latch, mode)) {
return(true); return(true);
} }
} }
...@@ -149,7 +149,7 @@ static inline bool btr_search_own_any(ulint mode) ...@@ -149,7 +149,7 @@ static inline bool btr_search_own_any(ulint mode)
static inline bool btr_search_own_any() static inline bool btr_search_own_any()
{ {
for (ulint i = btr_ahi_parts; i--; ) { for (ulint i = btr_ahi_parts; i--; ) {
if (rw_lock_own_flagged(btr_search_latches[i], if (rw_lock_own_flagged(&btr_search_sys.parts[i].latch,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)) { RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)) {
return true; return true;
} }
...@@ -157,34 +157,4 @@ static inline bool btr_search_own_any() ...@@ -157,34 +157,4 @@ static inline bool btr_search_own_any()
return false; return false;
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
/** Get the adaptive hash search index latch for a b-tree.
@param[in] index b-tree index
@return latch */
static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index)
{
ut_ad(index != NULL);
ut_ad(!index->table->space
|| index->table->space->id == index->table->space_id);
ulint ifold = ut_fold_ulint_pair(ulint(index->id),
index->table->space_id);
return(btr_search_latches[ifold % btr_ahi_parts]);
}
/** Get the hash-table based on index attributes.
A table is selected from an array of tables using pair of index-id, space-id.
@param[in] index index handler
@return hash table */
static inline hash_table_t* btr_get_search_table(const dict_index_t* index)
{
ut_ad(index != NULL);
ut_ad(index->table->space->id == index->table->space_id);
ulint ifold = ut_fold_ulint_pair(ulint(index->id),
index->table->space_id);
return(btr_search_sys->hash_tables[ifold % btr_ahi_parts]);
}
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
...@@ -1131,7 +1131,7 @@ struct buf_block_t{ ...@@ -1131,7 +1131,7 @@ struct buf_block_t{
assigning block->index = NULL (and block->n_pointers = 0) assigning block->index = NULL (and block->n_pointers = 0)
is allowed whenever btr_search_own_all(RW_LOCK_X). is allowed whenever btr_search_own_all(RW_LOCK_X).
Another exception is that ha_insert_for_fold_func() may Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be in btr_search_latches[]. Thus, n_pointers must be
protected by atomic memory access. protected by atomic memory access.
......
...@@ -43,125 +43,6 @@ ha_search_and_get_data( ...@@ -43,125 +43,6 @@ ha_search_and_get_data(
/*===================*/ /*===================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: folded value of the searched data */ ulint fold); /*!< in: folded value of the searched data */
/*********************************************************//**
Looks for an element when we know the pointer to the data and updates
the pointer to data if found.
@return TRUE if found */
ibool
ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* new_data);/*!< in: new pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Looks for an element when we know the pointer to the data and
updates the pointer to data if found.
@param table in/out: hash table
@param fold in: folded value of the searched data
@param data in: pointer to the data
@param new_block in: block containing new_data
@param new_data in: new pointer to the data */
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_block,new_data)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Looks for an element when we know the pointer to the data and
updates the pointer to data if found.
@param table in/out: hash table
@param fold in: folded value of the searched data
@param data in: pointer to the data
@param new_block ignored: block containing new_data
@param new_data in: new pointer to the data */
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_data)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_HASH_ADAPT
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated */
ibool
ha_insert_for_fold_func(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of data; if a node with
the same fold value already exists, it is
updated to point to the same data, and no new
node is created! */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data); /*!< in: data, must not be NULL */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated
@param t in: hash table
@param f in: folded value of data
@param b in: buffer block containing the data
@param d in: data, must not be NULL */
# define ha_insert_for_fold(t,f,b,d) do { \
ha_insert_for_fold_func(t,f,b,d); \
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \
} while(0)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated
@param t in: hash table
@param f in: folded value of data
@param b ignored: buffer block containing the data
@param d in: data, must not be NULL */
# define ha_insert_for_fold(t,f,b,d) do { \
ha_insert_for_fold_func(t,f,d); \
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \
} while (0)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/*********************************************************//**
Looks for an element when we know the pointer to the data and deletes
it from the hash table if found.
@return TRUE if found */
UNIV_INLINE
ibool
ha_search_and_delete_if_found(
/*==========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data); /*!< in: pointer to the data */
/*****************************************************************//**
Removes from the chain determined by fold all nodes whose data pointer
points to the page given. */
void
ha_remove_all_nodes_to_page(
/*========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: fold value */
const page_t* page); /*!< in: buffer page */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
@return TRUE if ok */
ibool
ha_validate(
/*========*/
hash_table_t* table, /*!< in: hash table */
ulint start_index, /*!< in: start index */
ulint end_index); /*!< in: end index */
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/** The hash table external chain node */ /** The hash table external chain node */
struct ha_node_t { struct ha_node_t {
......
...@@ -25,8 +25,6 @@ Created 8/18/1994 Heikki Tuuri ...@@ -25,8 +25,6 @@ Created 8/18/1994 Heikki Tuuri
*************************************************************************/ *************************************************************************/
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
#include "ut0rnd.h"
#include "mem0mem.h"
#include "btr0types.h" #include "btr0types.h"
/******************************************************************//** /******************************************************************//**
...@@ -154,45 +152,4 @@ ha_search_with_data( ...@@ -154,45 +152,4 @@ ha_search_with_data(
return(NULL); return(NULL);
} }
/***********************************************************//**
Deletes a hash node. */
void
ha_delete_hash_node(
/*================*/
hash_table_t* table, /*!< in: hash table */
ha_node_t* del_node); /*!< in: node to be deleted */
#ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched
@param table hash table */
void ha_btr_search_latch_x_locked(const hash_table_t* table);
#endif /* UNIV_DEBUG */
/*********************************************************//**
Looks for an element when we know the pointer to the data, and deletes
it from the hash table, if found.
@return TRUE if found */
UNIV_INLINE
ibool
ha_search_and_delete_if_found(
/*==========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data) /*!< in: pointer to the data */
{
ha_node_t* node;
ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled);
node = ha_search_with_data(table, fold, data);
if (node) {
ha_delete_hash_node(table, node);
return(TRUE);
}
return(FALSE);
}
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
...@@ -24,11 +24,8 @@ The simple hash table utility ...@@ -24,11 +24,8 @@ The simple hash table utility
Created 5/20/1997 Heikki Tuuri Created 5/20/1997 Heikki Tuuri
*******************************************************/ *******************************************************/
#ifndef hash0hash_h #pragma once
#define hash0hash_h #include "ut0rnd.h"
#include "mem0mem.h"
#include "sync0rw.h"
struct hash_table_t; struct hash_table_t;
struct hash_cell_t{ struct hash_cell_t{
...@@ -259,26 +256,19 @@ do {\ ...@@ -259,26 +256,19 @@ do {\
}\ }\
} while (0) } while (0)
/* The hash table structure */ /** Hash table with singly-linkde overflow lists */
struct hash_table_t { struct hash_table_t
#ifdef BTR_CUR_HASH_ADAPT {
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** number of elements in array (a prime number) */
ibool adaptive;/* TRUE if this is the hash ulint n_cells;
table of the adaptive hash /** the hash array */
index */ hash_cell_t *array;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */ /** Create the hash table.
ulint n_cells;/* number of cells in the hash table */ @param n the lower bound of n_cells */
hash_cell_t* array; /*!< pointer to cell array */ void create(ulint n);
mem_heap_t* heap;
#ifdef UNIV_DEBUG
ulint magic_n;
# define HASH_TABLE_MAGIC_N 76561114
#endif /* UNIV_DEBUG */
ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); } ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); }
}; };
#include "hash0hash.ic" #include "hash0hash.ic"
#endif
...@@ -35,7 +35,6 @@ hash_get_nth_cell( ...@@ -35,7 +35,6 @@ hash_get_nth_cell(
ulint n) /*!< in: cell index */ ulint n) /*!< in: cell index */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(n < table->n_cells); ut_ad(n < table->n_cells);
return(table->array + n); return(table->array + n);
...@@ -50,7 +49,6 @@ hash_table_clear( ...@@ -50,7 +49,6 @@ hash_table_clear(
hash_table_t* table) /*!< in/out: hash table */ hash_table_t* table) /*!< in/out: hash table */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
memset(table->array, 0x0, memset(table->array, 0x0,
table->n_cells * sizeof(*table->array)); table->n_cells * sizeof(*table->array));
} }
...@@ -65,6 +63,5 @@ hash_get_n_cells( ...@@ -65,6 +63,5 @@ hash_get_n_cells(
hash_table_t* table) /*!< in: table */ hash_table_t* table) /*!< in: table */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
return(table->n_cells); return(table->n_cells);
} }
...@@ -859,7 +859,6 @@ constexpr const char* const auto_event_names[] = ...@@ -859,7 +859,6 @@ constexpr const char* const auto_event_names[] =
"fts0tlex", "fts0tlex",
"gis0sea", "gis0sea",
"ha_innodb", "ha_innodb",
"ha0ha",
"handler0alter", "handler0alter",
"hash0hash", "hash0hash",
"i_s", "i_s",
......
...@@ -3823,7 +3823,7 @@ row_sel_try_search_shortcut_for_mysql( ...@@ -3823,7 +3823,7 @@ row_sel_try_search_shortcut_for_mysql(
ut_ad(dict_index_is_clust(index)); ut_ad(dict_index_is_clust(index));
ut_ad(!prebuilt->templ_contains_blob); ut_ad(!prebuilt->templ_contains_blob);
rw_lock_t* ahi_latch = btr_get_search_latch(index); rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index);
rw_lock_s_lock(ahi_latch); rw_lock_s_lock(ahi_latch);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr); BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
......
...@@ -965,29 +965,16 @@ srv_printf_innodb_monitor( ...@@ -965,29 +965,16 @@ srv_printf_innodb_monitor(
ibuf_print(file); ibuf_print(file);
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
btr_search_x_lock_all();
for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) { for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) {
const hash_table_t* table = btr_search_sys->hash_tables[i]; const auto part= &btr_search_sys.parts[i];
rw_lock_s_lock(&part->latch);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
ut_ad(table->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
const mem_heap_t* heap = table->heap;
/* The heap may change during the following call,
so the data displayed may be garbage. We intentionally
avoid acquiring btr_search_latches[] so that the
diagnostic output will not stop here even in case another
thread hangs while holding btr_search_latches[].
This should be safe from crashes, because
table->heap will be pointing to the same object
for the full lifetime of the server. Even during
btr_search_disable() the heap will stay valid. */
fprintf(file, "Hash table size " ULINTPF fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n", ", node heap has " ULINTPF " buffer(s)\n",
table->n_cells, heap->base.count - !heap->free_block); part->table.n_cells,
part->heap->base.count - !part->heap->free_block);
rw_lock_s_unlock(&part->latch);
} }
btr_search_x_unlock_all();
fprintf(file, fprintf(file,
"%.2f hash searches/s, %.2f non-hash searches/s\n", "%.2f hash searches/s, %.2f non-hash searches/s\n",
...@@ -1126,22 +1113,15 @@ srv_export_innodb_status(void) ...@@ -1126,22 +1113,15 @@ srv_export_innodb_status(void)
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
ulint mem_adaptive_hash = 0; ulint mem_adaptive_hash = 0;
for (ulong i = 0; i < btr_ahi_parts; i++) { for (ulong i = 0; i < btr_ahi_parts; i++) {
rw_lock_s_lock(btr_search_latches[i]); const auto part= &btr_search_sys.parts[i];
if (!btr_search_sys->hash_tables) { rw_lock_s_lock(&part->latch);
next: if (part->heap) {
rw_lock_s_unlock(btr_search_latches[i]); ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
continue;
}
hash_table_t* ht = btr_search_sys->hash_tables[i];
ut_ad(ht); mem_adaptive_hash += mem_heap_get_size(part->heap)
ut_ad(ht->heap); + part->table.n_cells * sizeof(hash_cell_t);
ut_ad(ht->heap->type == MEM_HEAP_FOR_BTR_SEARCH); }
rw_lock_s_unlock(&part->latch);
mem_adaptive_hash += mem_heap_get_size(ht->heap)
+ ht->n_cells * sizeof(hash_cell_t);
goto next;
} }
export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash; export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash;
#endif #endif
......
...@@ -2127,9 +2127,6 @@ void innodb_shutdown() ...@@ -2127,9 +2127,6 @@ void innodb_shutdown()
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys.is_initialised() || !srv_was_started); ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started); ut_ad(log_sys.is_initialised() || !srv_was_started);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(btr_search_sys || !srv_was_started);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(ibuf.index || !srv_was_started); ut_ad(ibuf.index || !srv_was_started);
dict_stats_deinit(); dict_stats_deinit();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment