Commit bf3c862f authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-22871: Clean up btr_search_sys

btr_search_sys::parts[]: A single structure for the partitions of
the adaptive hash index. Replaces the 3 separate arrays:
btr_search_latches[], btr_search_sys->hash_tables,
btr_search_sys->hash_tables[i]->heap.

hash_table_t::heap, hash_table_t::adaptive: Remove.

ha0ha.cc: Remove. Move all code to btr0sea.cc.
parent 9159b897
...@@ -56,7 +56,6 @@ SET(INNOBASE_SOURCES ...@@ -56,7 +56,6 @@ SET(INNOBASE_SOURCES
fsp/fsp0space.cc fsp/fsp0space.cc
fsp/fsp0sysspace.cc fsp/fsp0sysspace.cc
fut/fut0lst.cc fut/fut0lst.cc
ha/ha0ha.cc
ha/ha0storage.cc ha/ha0storage.cc
ha/hash0hash.cc ha/hash0hash.cc
fts/fts0fts.cc fts/fts0fts.cc
......
...@@ -3561,7 +3561,7 @@ btr_cur_optimistic_insert( ...@@ -3561,7 +3561,7 @@ btr_cur_optimistic_insert(
ut_ad(index->is_instant()); ut_ad(index->is_instant());
ut_ad(flags == BTR_NO_LOCKING_FLAG); ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else { } else {
rw_lock_t* ahi_latch = btr_get_search_latch(index); rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) { if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert( btr_search_update_hash_node_on_insert(
cursor, ahi_latch); cursor, ahi_latch);
...@@ -3772,7 +3772,7 @@ btr_cur_pessimistic_insert( ...@@ -3772,7 +3772,7 @@ btr_cur_pessimistic_insert(
ut_ad(!(flags & BTR_CREATE_FLAG)); ut_ad(!(flags & BTR_CREATE_FLAG));
} else { } else {
btr_search_update_hash_on_insert( btr_search_update_hash_on_insert(
cursor, btr_get_search_latch(index)); cursor, btr_search_sys.get_latch(*index));
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) { if (inherit && !(flags & BTR_NO_LOCKING_FLAG)) {
...@@ -4274,7 +4274,7 @@ btr_cur_update_in_place( ...@@ -4274,7 +4274,7 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
{ {
rw_lock_t* ahi_latch = block->index rw_lock_t* ahi_latch = block->index
? btr_get_search_latch(index) : NULL; ? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) { if (ahi_latch) {
/* TO DO: Can we skip this if none of the fields /* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields index->search_info->curr_n_fields
......
This diff is collapsed.
...@@ -1552,7 +1552,7 @@ bool buf_pool_t::create() ...@@ -1552,7 +1552,7 @@ bool buf_pool_t::create()
chunk_t::map_ref= chunk_t::map_reg; chunk_t::map_ref= chunk_t::map_reg;
buf_LRU_old_ratio_update(100 * 3 / 8, false); buf_LRU_old_ratio_update(100 * 3 / 8, false);
btr_search_sys_create(srv_buf_pool_curr_size / sizeof(void*) / 64); btr_search_sys_create();
ut_ad(is_initialised()); ut_ad(is_initialised());
return false; return false;
} }
......
This diff is collapsed.
...@@ -28,20 +28,23 @@ Created 5/20/1997 Heikki Tuuri ...@@ -28,20 +28,23 @@ Created 5/20/1997 Heikki Tuuri
#include "mem0mem.h" #include "mem0mem.h"
#include "sync0sync.h" #include "sync0sync.h"
/** Create the hash table.
@param n the lower bound of n_cells */
void hash_table_t::create(ulint n)
{
n_cells= ut_find_prime(n);
array= static_cast<hash_cell_t*>(ut_zalloc_nokey(n_cells * sizeof *array));
}
/** /**
Create a hash table. Create a hash table.
@param n the minimum number of hash array elements @param n the minimum number of hash array elements
@return created table (with n_cells being a prime, at least n) */ @return created table (with n_cells being a prime, at least n) */
hash_table_t *hash_create(ulint n) hash_table_t *hash_create(ulint n)
{ {
ulint prime= ut_find_prime(n);
hash_table_t *table= static_cast<hash_table_t*> hash_table_t *table= static_cast<hash_table_t*>
(ut_zalloc_nokey(sizeof *table)); (ut_zalloc_nokey(sizeof *table));
table->array= static_cast<hash_cell_t*>(ut_zalloc_nokey(sizeof(hash_cell_t) * table->create(n);
prime));
table->n_cells= prime;
ut_d(table->magic_n= HASH_TABLE_MAGIC_N);
return table; return table;
} }
...@@ -52,8 +55,6 @@ hash_table_free( ...@@ -52,8 +55,6 @@ hash_table_free(
/*============*/ /*============*/
hash_table_t* table) /*!< in, own: hash table */ hash_table_t* table) /*!< in, own: hash table */
{ {
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_free(table->array); ut_free(table->array);
ut_free(table); ut_free(table);
} }
...@@ -30,13 +30,10 @@ Created 2/17/1996 Heikki Tuuri ...@@ -30,13 +30,10 @@ Created 2/17/1996 Heikki Tuuri
#include "dict0dict.h" #include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h" #include "ha0ha.h"
#include "sync0sync.h"
/** Creates and initializes the adaptive search system at a database start. #define btr_search_sys_create() btr_search_sys.create()
@param[in] hash_size hash table size. */ #define btr_search_sys_free() btr_search_sys.free()
void btr_search_sys_create(ulint hash_size);
/** Frees the adaptive search system at a database shutdown. */
void btr_search_sys_free();
/** Disable the adaptive hash search system and empty the index. */ /** Disable the adaptive hash search system and empty the index. */
void btr_search_disable(); void btr_search_disable();
...@@ -162,19 +159,8 @@ static inline bool btr_search_own_any(); ...@@ -162,19 +159,8 @@ static inline bool btr_search_own_any();
/** Unlock all search latches from shared mode. */ /** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all(); static inline void btr_search_s_unlock_all();
/** Get the latch based on index attributes.
A latch is selected from an array of latches using pair of index-id, space-id.
@param[in] index index handler
@return latch */
static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index);
/** Get the hash-table based on index attributes.
A table is selected from an array of tables using pair of index-id, space-id.
@param[in] index index handler
@return hash table */
static inline hash_table_t* btr_get_search_table(const dict_index_t* index);
#else /* BTR_CUR_HASH_ADAPT */ #else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create(size) # define btr_search_sys_create()
# define btr_search_sys_free() # define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block) # define btr_search_drop_page_hash_index(block)
# define btr_search_s_lock_all(index) # define btr_search_s_lock_all(index)
...@@ -259,31 +245,119 @@ struct btr_search_t{ ...@@ -259,31 +245,119 @@ struct btr_search_t{
}; };
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/** The hash index system */
struct btr_search_sys_t
{
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
rw_lock_t latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(rw_lock_t) -
sizeof(hash_table_t) - sizeof(mem_heap_t)) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
void init()
{
memset((void*) this, 0, sizeof *this);
rw_lock_create(btr_search_latch_key, &latch, SYNC_SEARCH_SYS);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
rw_lock_free(&latch);
if (heap)
clear();
}
};
/** Partitions of the adaptive hash index */
partition *parts;
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
{
ut_ad(index.table->space->id == index.table->space_id);
return get_part(ulint(index.id), index.table->space_id);
}
/** Get the search latch for the adaptive hash index partition */
rw_lock_t *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
/** Free at shutdown */
void free()
{
if (parts)
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
}
};
/** The adaptive hash index */
extern btr_search_sys_t btr_search_sys;
/** @return number of leaf pages pointed to by the adaptive hash index */ /** @return number of leaf pages pointed to by the adaptive hash index */
inline ulint dict_index_t::n_ahi_pages() const inline ulint dict_index_t::n_ahi_pages() const
{ {
if (!btr_search_enabled) if (!btr_search_enabled)
return 0; return 0;
rw_lock_t *latch = btr_get_search_latch(this); rw_lock_t *latch = &btr_search_sys.get_part(*this)->latch;
rw_lock_s_lock(latch); rw_lock_s_lock(latch);
ulint ref_count= search_info->ref_count; ulint ref_count= search_info->ref_count;
rw_lock_s_unlock(latch); rw_lock_s_unlock(latch);
return ref_count; return ref_count;
} }
/** The hash index system */
struct btr_search_sys_t{
hash_table_t** hash_tables; /*!< the adaptive hash tables,
mapping dtuple_fold values
to rec_t pointers on index pages */
};
/** Latches protecting access to adaptive hash index. */
extern rw_lock_t** btr_search_latches;
/** The adaptive hash index */
extern btr_search_sys_t* btr_search_sys;
#ifdef UNIV_SEARCH_PERF_STAT #ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */ /** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ; extern ulint btr_search_n_succ;
......
...@@ -88,7 +88,7 @@ btr_search_info_update( ...@@ -88,7 +88,7 @@ btr_search_info_update(
static inline void btr_search_x_lock_all() static inline void btr_search_x_lock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_x_lock(btr_search_latches[i]); rw_lock_x_lock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -96,7 +96,7 @@ static inline void btr_search_x_lock_all() ...@@ -96,7 +96,7 @@ static inline void btr_search_x_lock_all()
static inline void btr_search_x_unlock_all() static inline void btr_search_x_unlock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_x_unlock(btr_search_latches[i]); rw_lock_x_unlock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -104,7 +104,7 @@ static inline void btr_search_x_unlock_all() ...@@ -104,7 +104,7 @@ static inline void btr_search_x_unlock_all()
static inline void btr_search_s_lock_all() static inline void btr_search_s_lock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_s_lock(btr_search_latches[i]); rw_lock_s_lock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -112,7 +112,7 @@ static inline void btr_search_s_lock_all() ...@@ -112,7 +112,7 @@ static inline void btr_search_s_lock_all()
static inline void btr_search_s_unlock_all() static inline void btr_search_s_unlock_all()
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
rw_lock_s_unlock(btr_search_latches[i]); rw_lock_s_unlock(&btr_search_sys.parts[i].latch);
} }
} }
...@@ -124,7 +124,7 @@ static inline void btr_search_s_unlock_all() ...@@ -124,7 +124,7 @@ static inline void btr_search_s_unlock_all()
static inline bool btr_search_own_all(ulint mode) static inline bool btr_search_own_all(ulint mode)
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
if (!rw_lock_own(btr_search_latches[i], mode)) { if (!rw_lock_own(&btr_search_sys.parts[i].latch, mode)) {
return(false); return(false);
} }
} }
...@@ -138,7 +138,7 @@ static inline bool btr_search_own_all(ulint mode) ...@@ -138,7 +138,7 @@ static inline bool btr_search_own_all(ulint mode)
static inline bool btr_search_own_any(ulint mode) static inline bool btr_search_own_any(ulint mode)
{ {
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
if (rw_lock_own(btr_search_latches[i], mode)) { if (rw_lock_own(&btr_search_sys.parts[i].latch, mode)) {
return(true); return(true);
} }
} }
...@@ -149,7 +149,7 @@ static inline bool btr_search_own_any(ulint mode) ...@@ -149,7 +149,7 @@ static inline bool btr_search_own_any(ulint mode)
static inline bool btr_search_own_any() static inline bool btr_search_own_any()
{ {
for (ulint i = btr_ahi_parts; i--; ) { for (ulint i = btr_ahi_parts; i--; ) {
if (rw_lock_own_flagged(btr_search_latches[i], if (rw_lock_own_flagged(&btr_search_sys.parts[i].latch,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)) { RW_LOCK_FLAG_X | RW_LOCK_FLAG_S)) {
return true; return true;
} }
...@@ -157,34 +157,4 @@ static inline bool btr_search_own_any() ...@@ -157,34 +157,4 @@ static inline bool btr_search_own_any()
return false; return false;
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
/** Get the adaptive hash search index latch for a b-tree.
@param[in] index b-tree index
@return latch */
static inline rw_lock_t* btr_get_search_latch(const dict_index_t* index)
{
ut_ad(index != NULL);
ut_ad(!index->table->space
|| index->table->space->id == index->table->space_id);
ulint ifold = ut_fold_ulint_pair(ulint(index->id),
index->table->space_id);
return(btr_search_latches[ifold % btr_ahi_parts]);
}
/** Get the hash-table based on index attributes.
A table is selected from an array of tables using pair of index-id, space-id.
@param[in] index index handler
@return hash table */
static inline hash_table_t* btr_get_search_table(const dict_index_t* index)
{
ut_ad(index != NULL);
ut_ad(index->table->space->id == index->table->space_id);
ulint ifold = ut_fold_ulint_pair(ulint(index->id),
index->table->space_id);
return(btr_search_sys->hash_tables[ifold % btr_ahi_parts]);
}
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
...@@ -1131,7 +1131,7 @@ struct buf_block_t{ ...@@ -1131,7 +1131,7 @@ struct buf_block_t{
assigning block->index = NULL (and block->n_pointers = 0) assigning block->index = NULL (and block->n_pointers = 0)
is allowed whenever btr_search_own_all(RW_LOCK_X). is allowed whenever btr_search_own_all(RW_LOCK_X).
Another exception is that ha_insert_for_fold_func() may Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be in btr_search_latches[]. Thus, n_pointers must be
protected by atomic memory access. protected by atomic memory access.
......
...@@ -43,125 +43,6 @@ ha_search_and_get_data( ...@@ -43,125 +43,6 @@ ha_search_and_get_data(
/*===================*/ /*===================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: folded value of the searched data */ ulint fold); /*!< in: folded value of the searched data */
/*********************************************************//**
Looks for an element when we know the pointer to the data and updates
the pointer to data if found.
@return TRUE if found */
ibool
ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* new_data);/*!< in: new pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Looks for an element when we know the pointer to the data and
updates the pointer to data if found.
@param table in/out: hash table
@param fold in: folded value of the searched data
@param data in: pointer to the data
@param new_block in: block containing new_data
@param new_data in: new pointer to the data */
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_block,new_data)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Looks for an element when we know the pointer to the data and
updates the pointer to data if found.
@param table in/out: hash table
@param fold in: folded value of the searched data
@param data in: pointer to the data
@param new_block ignored: block containing new_data
@param new_data in: new pointer to the data */
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found_func(table,fold,data,new_data)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_HASH_ADAPT
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated */
ibool
ha_insert_for_fold_func(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of data; if a node with
the same fold value already exists, it is
updated to point to the same data, and no new
node is created! */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data); /*!< in: data, must not be NULL */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated
@param t in: hash table
@param f in: folded value of data
@param b in: buffer block containing the data
@param d in: data, must not be NULL */
# define ha_insert_for_fold(t,f,b,d) do { \
ha_insert_for_fold_func(t,f,b,d); \
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \
} while(0)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@return TRUE if succeed, FALSE if no more memory could be allocated
@param t in: hash table
@param f in: folded value of data
@param b ignored: buffer block containing the data
@param d in: data, must not be NULL */
# define ha_insert_for_fold(t,f,b,d) do { \
ha_insert_for_fold_func(t,f,d); \
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED); \
} while (0)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/*********************************************************//**
Looks for an element when we know the pointer to the data and deletes
it from the hash table if found.
@return TRUE if found */
UNIV_INLINE
ibool
ha_search_and_delete_if_found(
/*==========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data); /*!< in: pointer to the data */
/*****************************************************************//**
Removes from the chain determined by fold all nodes whose data pointer
points to the page given. */
void
ha_remove_all_nodes_to_page(
/*========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: fold value */
const page_t* page); /*!< in: buffer page */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/*************************************************************//**
Validates a given range of the cells in hash table.
@return TRUE if ok */
ibool
ha_validate(
/*========*/
hash_table_t* table, /*!< in: hash table */
ulint start_index, /*!< in: start index */
ulint end_index); /*!< in: end index */
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/** The hash table external chain node */ /** The hash table external chain node */
struct ha_node_t { struct ha_node_t {
......
...@@ -25,8 +25,6 @@ Created 8/18/1994 Heikki Tuuri ...@@ -25,8 +25,6 @@ Created 8/18/1994 Heikki Tuuri
*************************************************************************/ *************************************************************************/
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
#include "ut0rnd.h"
#include "mem0mem.h"
#include "btr0types.h" #include "btr0types.h"
/******************************************************************//** /******************************************************************//**
...@@ -154,45 +152,4 @@ ha_search_with_data( ...@@ -154,45 +152,4 @@ ha_search_with_data(
return(NULL); return(NULL);
} }
/***********************************************************//**
Deletes a hash node. */
void
ha_delete_hash_node(
/*================*/
hash_table_t* table, /*!< in: hash table */
ha_node_t* del_node); /*!< in: node to be deleted */
#ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched
@param table hash table */
void ha_btr_search_latch_x_locked(const hash_table_t* table);
#endif /* UNIV_DEBUG */
/*********************************************************//**
Looks for an element when we know the pointer to the data, and deletes
it from the hash table, if found.
@return TRUE if found */
UNIV_INLINE
ibool
ha_search_and_delete_if_found(
/*==========================*/
hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */
const rec_t* data) /*!< in: pointer to the data */
{
ha_node_t* node;
ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled);
node = ha_search_with_data(table, fold, data);
if (node) {
ha_delete_hash_node(table, node);
return(TRUE);
}
return(FALSE);
}
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
...@@ -24,11 +24,8 @@ The simple hash table utility ...@@ -24,11 +24,8 @@ The simple hash table utility
Created 5/20/1997 Heikki Tuuri Created 5/20/1997 Heikki Tuuri
*******************************************************/ *******************************************************/
#ifndef hash0hash_h #pragma once
#define hash0hash_h #include "ut0rnd.h"
#include "mem0mem.h"
#include "sync0rw.h"
struct hash_table_t; struct hash_table_t;
struct hash_cell_t{ struct hash_cell_t{
...@@ -259,26 +256,19 @@ do {\ ...@@ -259,26 +256,19 @@ do {\
}\ }\
} while (0) } while (0)
/* The hash table structure */ /** Hash table with singly-linkde overflow lists */
struct hash_table_t { struct hash_table_t
#ifdef BTR_CUR_HASH_ADAPT {
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG /** number of elements in array (a prime number) */
ibool adaptive;/* TRUE if this is the hash ulint n_cells;
table of the adaptive hash /** the hash array */
index */ hash_cell_t *array;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */ /** Create the hash table.
ulint n_cells;/* number of cells in the hash table */ @param n the lower bound of n_cells */
hash_cell_t* array; /*!< pointer to cell array */ void create(ulint n);
mem_heap_t* heap;
#ifdef UNIV_DEBUG
ulint magic_n;
# define HASH_TABLE_MAGIC_N 76561114
#endif /* UNIV_DEBUG */
ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); } ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); }
}; };
#include "hash0hash.ic" #include "hash0hash.ic"
#endif
...@@ -35,7 +35,6 @@ hash_get_nth_cell( ...@@ -35,7 +35,6 @@ hash_get_nth_cell(
ulint n) /*!< in: cell index */ ulint n) /*!< in: cell index */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(n < table->n_cells); ut_ad(n < table->n_cells);
return(table->array + n); return(table->array + n);
...@@ -50,7 +49,6 @@ hash_table_clear( ...@@ -50,7 +49,6 @@ hash_table_clear(
hash_table_t* table) /*!< in/out: hash table */ hash_table_t* table) /*!< in/out: hash table */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
memset(table->array, 0x0, memset(table->array, 0x0,
table->n_cells * sizeof(*table->array)); table->n_cells * sizeof(*table->array));
} }
...@@ -65,6 +63,5 @@ hash_get_n_cells( ...@@ -65,6 +63,5 @@ hash_get_n_cells(
hash_table_t* table) /*!< in: table */ hash_table_t* table) /*!< in: table */
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
return(table->n_cells); return(table->n_cells);
} }
...@@ -859,7 +859,6 @@ constexpr const char* const auto_event_names[] = ...@@ -859,7 +859,6 @@ constexpr const char* const auto_event_names[] =
"fts0tlex", "fts0tlex",
"gis0sea", "gis0sea",
"ha_innodb", "ha_innodb",
"ha0ha",
"handler0alter", "handler0alter",
"hash0hash", "hash0hash",
"i_s", "i_s",
......
...@@ -3823,7 +3823,7 @@ row_sel_try_search_shortcut_for_mysql( ...@@ -3823,7 +3823,7 @@ row_sel_try_search_shortcut_for_mysql(
ut_ad(dict_index_is_clust(index)); ut_ad(dict_index_is_clust(index));
ut_ad(!prebuilt->templ_contains_blob); ut_ad(!prebuilt->templ_contains_blob);
rw_lock_t* ahi_latch = btr_get_search_latch(index); rw_lock_t* ahi_latch = btr_search_sys.get_latch(*index);
rw_lock_s_lock(ahi_latch); rw_lock_s_lock(ahi_latch);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr); BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
......
...@@ -965,29 +965,16 @@ srv_printf_innodb_monitor( ...@@ -965,29 +965,16 @@ srv_printf_innodb_monitor(
ibuf_print(file); ibuf_print(file);
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
btr_search_x_lock_all();
for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) { for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) {
const hash_table_t* table = btr_search_sys->hash_tables[i]; const auto part= &btr_search_sys.parts[i];
rw_lock_s_lock(&part->latch);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
ut_ad(table->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
const mem_heap_t* heap = table->heap;
/* The heap may change during the following call,
so the data displayed may be garbage. We intentionally
avoid acquiring btr_search_latches[] so that the
diagnostic output will not stop here even in case another
thread hangs while holding btr_search_latches[].
This should be safe from crashes, because
table->heap will be pointing to the same object
for the full lifetime of the server. Even during
btr_search_disable() the heap will stay valid. */
fprintf(file, "Hash table size " ULINTPF fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n", ", node heap has " ULINTPF " buffer(s)\n",
table->n_cells, heap->base.count - !heap->free_block); part->table.n_cells,
part->heap->base.count - !part->heap->free_block);
rw_lock_s_unlock(&part->latch);
} }
btr_search_x_unlock_all();
fprintf(file, fprintf(file,
"%.2f hash searches/s, %.2f non-hash searches/s\n", "%.2f hash searches/s, %.2f non-hash searches/s\n",
...@@ -1126,22 +1113,15 @@ srv_export_innodb_status(void) ...@@ -1126,22 +1113,15 @@ srv_export_innodb_status(void)
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
ulint mem_adaptive_hash = 0; ulint mem_adaptive_hash = 0;
for (ulong i = 0; i < btr_ahi_parts; i++) { for (ulong i = 0; i < btr_ahi_parts; i++) {
rw_lock_s_lock(btr_search_latches[i]); const auto part= &btr_search_sys.parts[i];
if (!btr_search_sys->hash_tables) { rw_lock_s_lock(&part->latch);
next: if (part->heap) {
rw_lock_s_unlock(btr_search_latches[i]); ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
continue;
}
hash_table_t* ht = btr_search_sys->hash_tables[i];
ut_ad(ht); mem_adaptive_hash += mem_heap_get_size(part->heap)
ut_ad(ht->heap); + part->table.n_cells * sizeof(hash_cell_t);
ut_ad(ht->heap->type == MEM_HEAP_FOR_BTR_SEARCH); }
rw_lock_s_unlock(&part->latch);
mem_adaptive_hash += mem_heap_get_size(ht->heap)
+ ht->n_cells * sizeof(hash_cell_t);
goto next;
} }
export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash; export_vars.innodb_mem_adaptive_hash = mem_adaptive_hash;
#endif #endif
......
...@@ -2127,9 +2127,6 @@ void innodb_shutdown() ...@@ -2127,9 +2127,6 @@ void innodb_shutdown()
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys.is_initialised() || !srv_was_started); ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys.is_initialised() || !srv_was_started); ut_ad(log_sys.is_initialised() || !srv_was_started);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(btr_search_sys || !srv_was_started);
#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(ibuf.index || !srv_was_started); ut_ad(ibuf.index || !srv_was_started);
dict_stats_deinit(); dict_stats_deinit();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment