Commit 0485328d authored by Monty's avatar Monty

Cache check_table_binlog_row_based and mark_trx_read_write

Benefits:
- Speeds up insert,write and delete by avoiding 1-2 function calls per write/update/delete.
- Avoiding calling write_locked_table_maps() if not needed.
- The inlined code is much smaller than before
- Updating of table->s->cached_row_logging_check moved to when table is opened
- Moved some bool values together in handler class to get better alignment.
parent b436db98
This diff is collapsed.
...@@ -2581,11 +2581,6 @@ class handler :public Sql_alloc ...@@ -2581,11 +2581,6 @@ class handler :public Sql_alloc
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */ RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */ HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */ uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
/* TRUE <=> source MRR ranges and the output are ordered */
bool mrr_is_output_sorted;
/** TRUE <=> we're currently traversing a range in mrr_cur_range. */
bool mrr_have_range;
/** Current range (the one we're now returning rows from) */ /** Current range (the one we're now returning rows from) */
KEY_MULTI_RANGE mrr_cur_range; KEY_MULTI_RANGE mrr_cur_range;
...@@ -2593,23 +2588,32 @@ class handler :public Sql_alloc ...@@ -2593,23 +2588,32 @@ class handler :public Sql_alloc
key_range save_end_range, *end_range; key_range save_end_range, *end_range;
KEY_PART_INFO *range_key_part; KEY_PART_INFO *range_key_part;
int key_compare_result_on_equal; int key_compare_result_on_equal;
bool eq_range;
bool internal_tmp_table; /* If internal tmp table */
uint errkey; /* Last dup key */ /* TRUE <=> source MRR ranges and the output are ordered */
uint key_used_on_scan; bool mrr_is_output_sorted;
uint active_index; /** TRUE <=> we're currently traversing a range in mrr_cur_range. */
bool mrr_have_range;
bool eq_range;
bool internal_tmp_table; /* If internal tmp table */
bool implicit_emptied; /* Can be !=0 only if HEAP */
bool mark_trx_read_write_done; /* mark_trx_read_write was called */
bool check_table_binlog_row_based_done; /* check_table_binlog.. was called */
bool check_table_binlog_row_based_result; /* cached check_table_binlog... */
/* /*
TRUE <=> the engine guarantees that returned records are within the range TRUE <=> the engine guarantees that returned records are within the range
being scanned. being scanned.
*/ */
bool in_range_check_pushed_down; bool in_range_check_pushed_down;
uint errkey; /* Last dup key */
uint key_used_on_scan;
uint active_index;
/** Length of ref (1-8 or the clustered key length) */ /** Length of ref (1-8 or the clustered key length) */
uint ref_length; uint ref_length;
FT_INFO *ft_handler; FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited; enum {NONE=0, INDEX, RND} inited;
bool implicit_emptied; /* Can be !=0 only if HEAP */
const COND *pushed_cond; const COND *pushed_cond;
/** /**
next_insert_id is the next value which should be inserted into the next_insert_id is the next value which should be inserted into the
...@@ -2693,11 +2697,16 @@ class handler :public Sql_alloc ...@@ -2693,11 +2697,16 @@ class handler :public Sql_alloc
handler(handlerton *ht_arg, TABLE_SHARE *share_arg) handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0), :table_share(share_arg), table(0),
estimation_rows_to_insert(0), ht(ht_arg), estimation_rows_to_insert(0), ht(ht_arg),
ref(0), end_range(NULL), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref(0), end_range(NULL),
implicit_emptied(0),
mark_trx_read_write_done(0),
check_table_binlog_row_based_done(0),
check_table_binlog_row_based_result(0),
in_range_check_pushed_down(FALSE), in_range_check_pushed_down(FALSE),
key_used_on_scan(MAX_KEY),
active_index(MAX_KEY),
ref_length(sizeof(my_off_t)), ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE), ft_handler(0), inited(NONE),
implicit_emptied(0),
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0), pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
tracker(NULL), tracker(NULL),
pushed_idx_cond(NULL), pushed_idx_cond(NULL),
...@@ -3875,10 +3884,22 @@ class handler :public Sql_alloc ...@@ -3875,10 +3884,22 @@ class handler :public Sql_alloc
*/ */
virtual int delete_table(const char *name); virtual int delete_table(const char *name);
public:
inline bool check_table_binlog_row_based(bool binlog_row);
private: private:
/* Cache result to avoid extra calls */
inline void mark_trx_read_write()
{
if (unlikely(!mark_trx_read_write_done))
{
mark_trx_read_write_done= 1;
mark_trx_read_write_internal();
}
}
void mark_trx_read_write_internal();
bool check_table_binlog_row_based_internal(bool binlog_row);
/* Private helpers */ /* Private helpers */
inline void mark_trx_read_write();
private:
inline void increment_statistics(ulong SSV::*offset) const; inline void increment_statistics(ulong SSV::*offset) const;
inline void decrement_statistics(ulong SSV::*offset) const; inline void decrement_statistics(ulong SSV::*offset) const;
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "discover.h" #include "discover.h"
#include "mdl.h" // MDL_wait_for_graph_visitor #include "mdl.h" // MDL_wait_for_graph_visitor
#include "sql_view.h" #include "sql_view.h"
#include "rpl_filter.h"
/* INFORMATION_SCHEMA name */ /* INFORMATION_SCHEMA name */
LEX_STRING INFORMATION_SCHEMA_NAME= {C_STRING_WITH_LEN("information_schema")}; LEX_STRING INFORMATION_SCHEMA_NAME= {C_STRING_WITH_LEN("information_schema")};
...@@ -316,7 +317,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, ...@@ -316,7 +317,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
share->normalized_path.length= path_length; share->normalized_path.length= path_length;
share->table_category= get_table_category(& share->db, & share->table_name); share->table_category= get_table_category(& share->db, & share->table_name);
share->open_errno= ENOENT; share->open_errno= ENOENT;
share->cached_row_logging_check= -1; /* The following will be fixed in open_table_from_share */
share->cached_row_logging_check= 1;
init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0)); init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
...@@ -381,7 +383,7 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key, ...@@ -381,7 +383,7 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
share->path.length= share->normalized_path.length= strlen(path); share->path.length= share->normalized_path.length= strlen(path);
share->frm_version= FRM_VER_TRUE_VARCHAR; share->frm_version= FRM_VER_TRUE_VARCHAR;
share->cached_row_logging_check= -1; share->cached_row_logging_check= 0; // No row logging
/* /*
table_map_id is also used for MERGE tables to suppress repeated table_map_id is also used for MERGE tables to suppress repeated
...@@ -2974,6 +2976,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, ...@@ -2974,6 +2976,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
outparam->no_replicate= FALSE; outparam->no_replicate= FALSE;
} }
if (outparam->no_replicate || !binlog_filter->db_ok(outparam->s->db.str))
outparam->s->cached_row_logging_check= 0; // No row based replication
/* Increment the opened_tables counter, only when open flags set. */ /* Increment the opened_tables counter, only when open flags set. */
if (db_stat) if (db_stat)
thd->status_var.opened_tables++; thd->status_var.opened_tables++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment