Commit 2f09b28e authored by Monty's avatar Monty

Adding Full Text Search support to partitions

Contains Spiral patches:
007_mariadb-10.2.0.partition_fulltext.diff  MDEV-7705
038_mariadb-10.2.0.partition_fulltext2.diff MDEV-7734

This commit has the following differences compared to the original
patches:

- Added necessary full text search cleanup at the storage engine layer
  that was omitted in the original patch.
- Added test case.
- A lot of code cleanups to make the code notable smaller.
- Changed SQL code to use ha_ft_end() instead of ft_end()

Original author: Kentoku SHIBA
First reviewer:  Jacob Mathew
Second reviewer: Michael Widenius
parent 5b409843
......@@ -74,7 +74,6 @@
HA_REC_NOT_IN_SEQ | \
HA_CAN_REPAIR)
#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \
HA_CAN_FULLTEXT | \
HA_DUPLICATE_POS | \
HA_CAN_INSERT_DELAYED | \
HA_READ_BEFORE_WRITE_REMOVAL |\
......@@ -126,7 +125,6 @@ static void init_partition_psi_keys(void)
static int partition_initialize(void *p)
{
handlerton *partition_hton;
partition_hton= (handlerton *)p;
......@@ -245,12 +243,19 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
ha_partition_init();
DBUG_VOID_RETURN;
}
/* Initialize all partition variables */
void ha_partition::ha_partition_init()
{
init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
}
/*
Constructor method
......@@ -267,8 +272,7 @@ ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
DBUG_ASSERT(part_info);
init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
ha_partition_init();
m_part_info= part_info;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
......@@ -294,8 +298,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(clone)");
init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
ha_partition_init();
m_part_info= part_info_arg;
m_create_handler= TRUE;
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
......@@ -383,6 +386,9 @@ void ha_partition::init_handler_variables()
m_pre_calling= FALSE;
m_pre_call_use_parallel= FALSE;
ft_first= ft_current= NULL;
bulk_access_executing= FALSE; // For future
/*
Clear bitmaps to allow on one to call my_bitmap_free() on them at any time
*/
......@@ -3759,14 +3765,20 @@ int ha_partition::close(void)
bool first= TRUE;
handler **file;
uint i;
st_partition_ft_info *tmp_ft_info;
DBUG_ENTER("ha_partition::close");
DBUG_ASSERT(table->s == table_share);
DBUG_ASSERT(m_part_info);
destroy_record_priority_queue();
free_partition_bitmaps();
for (; ft_first ; ft_first= tmp_ft_info)
{
tmp_ft_info= ft_first->next;
my_free(ft_first);
}
/* Free active mrr_ranges */
for (i= 0; i < m_tot_parts; i++)
{
......@@ -6460,6 +6472,450 @@ int ha_partition::multi_range_read_explain_info(uint mrr_mode, char *str,
}
/**
Find and retrieve the Full Text Search relevance ranking for a search string
in a full text index.
@param handler Full Text Search handler
@param record Search string
@param length Length of the search string
@retval Relevance value
*/
float partition_ft_find_relevance(FT_INFO *handler,
uchar *record, uint length)
{
st_partition_ft_info *info= (st_partition_ft_info *)handler;
uint m_last_part= ((ha_partition*) info->file)->last_part();
FT_INFO *m_handler= info->part_ft_info[m_last_part];
DBUG_ENTER("partition_ft_find_relevance");
if (!m_handler)
DBUG_RETURN((float)-1.0);
DBUG_RETURN(m_handler->please->find_relevance(m_handler, record, length));
}
/**
Retrieve the Full Text Search relevance ranking for the current
full text search.
@param handler Full Text Search handler
@retval Relevance value
*/
float partition_ft_get_relevance(FT_INFO *handler)
{
st_partition_ft_info *info= (st_partition_ft_info *)handler;
uint m_last_part= ((ha_partition*) info->file)->last_part();
FT_INFO *m_handler= info->part_ft_info[m_last_part];
DBUG_ENTER("partition_ft_get_relevance");
if (!m_handler)
DBUG_RETURN((float)-1.0);
DBUG_RETURN(m_handler->please->get_relevance(m_handler));
}
/**
Free the memory for a full text search handler.
@param handler Full Text Search handler
*/
void partition_ft_close_search(FT_INFO *handler)
{
st_partition_ft_info *info= (st_partition_ft_info *)handler;
info->file->ft_close_search(handler);
}
/**
Free the memory for a full text search handler.
@param handler Full Text Search handler
*/
void ha_partition::ft_close_search(FT_INFO *handler)
{
uint i;
st_partition_ft_info *info= (st_partition_ft_info *)handler;
DBUG_ENTER("ha_partition::ft_close_search");
for (i= 0; i < m_tot_parts; i++)
{
FT_INFO *m_handler= info->part_ft_info[i];
DBUG_ASSERT(!m_handler ||
(m_handler->please && m_handler->please->close_search));
if (m_handler &&
m_handler->please &&
m_handler->please->close_search)
m_handler->please->close_search(m_handler);
}
DBUG_VOID_RETURN;
}
/* Partition Full Text search function table */
_ft_vft partition_ft_vft =
{
NULL, // partition_ft_read_next
partition_ft_find_relevance,
partition_ft_close_search,
partition_ft_get_relevance,
NULL // partition_ft_reinit_search
};
/**
Initialize a full text search.
*/
int ha_partition::ft_init()
{
int error;
uint i= 0;
uint32 part_id;
DBUG_ENTER("ha_partition::ft_init");
DBUG_PRINT("info", ("partition this: %p", this));
/*
For operations that may need to change data, we may need to extend
read_set.
*/
if (get_lock_type() == F_WRLCK)
{
/*
If write_set contains any of the fields used in partition and
subpartition expression, we need to set all bits in read_set because
the row may need to be inserted in a different [sub]partition. In
other words update_row() can be converted into write_row(), which
requires a complete record.
*/
if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
table->write_set))
bitmap_set_all(table->read_set);
else
{
/*
Some handlers only read fields as specified by the bitmap for the
read set. For partitioned handlers we always require that the
fields of the partition functions are read such that we can
calculate the partition id to place updated and deleted records.
*/
bitmap_union(table->read_set, &m_part_info->full_part_field_set);
}
}
/* Now we see what the index of our first important partition is */
DBUG_PRINT("info", ("m_part_info->read_partitions: %p",
(void *) m_part_info->read_partitions.bitmap));
part_id= bitmap_get_first_set(&(m_part_info->read_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
if (part_id == MY_BIT_NONE)
{
error= 0;
goto err1;
}
DBUG_PRINT("info", ("ft_init on partition %d", part_id));
/*
ft_end() is needed for partitioning to reset internal data if scan
is already in use
*/
if (m_pre_calling)
{
if ((error= pre_ft_end()))
goto err1;
}
else
ft_end();
m_index_scan_type= partition_ft_read;
for (i= part_id; i < m_tot_parts; i++)
{
if (bitmap_is_set(&(m_part_info->read_partitions), i))
{
error= m_pre_calling ? m_file[i]->pre_ft_init() : m_file[i]->ft_init();
if (error)
goto err2;
}
}
m_scan_value= 1;
m_part_spec.start_part= part_id;
m_part_spec.end_part= m_tot_parts - 1;
m_ft_init_and_first= TRUE;
DBUG_PRINT("info", ("m_scan_value: %d", m_scan_value));
DBUG_RETURN(0);
err2:
late_extra_no_cache(part_id);
while ((int)--i >= (int)part_id)
{
if (bitmap_is_set(&(m_part_info->read_partitions), i))
{
if (m_pre_calling)
m_file[i]->pre_ft_end();
else
m_file[i]->ft_end();
}
}
err1:
m_scan_value= 2;
m_part_spec.start_part= NO_CURRENT_PART_ID;
DBUG_RETURN(error);
}
/**
Initialize a full text search during a bulk access request.
*/
int ha_partition::pre_ft_init()
{
bool save_m_pre_calling;
int error;
DBUG_ENTER("ha_partition::pre_ft_init");
save_m_pre_calling= m_pre_calling;
m_pre_calling= TRUE;
error= ft_init();
m_pre_calling= save_m_pre_calling;
DBUG_RETURN(error);
}
/**
Terminate a full text search.
*/
void ha_partition::ft_end()
{
handler **file;
DBUG_ENTER("ha_partition::ft_end");
DBUG_PRINT("info", ("partition this: %p", this));
switch (m_scan_value) {
case 2: // Error
break;
case 1: // Table scan
if (NO_CURRENT_PART_ID != m_part_spec.start_part)
late_extra_no_cache(m_part_spec.start_part);
file= m_file;
do
{
if (bitmap_is_set(&(m_part_info->read_partitions), (uint)(file - m_file)))
{
if (m_pre_calling)
(*file)->pre_ft_end();
else
(*file)->ft_end();
}
} while (*(++file));
break;
}
m_scan_value= 2;
m_part_spec.start_part= NO_CURRENT_PART_ID;
ft_current= 0;
DBUG_VOID_RETURN;
}
/**
Terminate a full text search during a bulk access request.
*/
int ha_partition::pre_ft_end()
{
bool save_m_pre_calling;
DBUG_ENTER("ha_partition::pre_ft_end");
save_m_pre_calling= m_pre_calling;
m_pre_calling= TRUE;
ft_end();
m_pre_calling= save_m_pre_calling;
DBUG_RETURN(0);
}
/**
Initialize a full text search using the extended API.
@param flags Search flags
@param inx Key number
@param key Key value
@return FT_INFO structure if successful
NULL otherwise
*/
FT_INFO *ha_partition::ft_init_ext(uint flags, uint inx, String *key)
{
FT_INFO *ft_handler;
handler **file;
st_partition_ft_info *ft_target, **parent;
DBUG_ENTER("ha_partition::ft_init_ext");
if (ft_current)
parent= &ft_current->next;
else
parent= &ft_first;
if (!(ft_target= *parent))
{
FT_INFO **tmp_ft_info;
if (!(ft_target= (st_partition_ft_info *)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&ft_target,
sizeof(st_partition_ft_info),
&tmp_ft_info,
sizeof(FT_INFO *) * m_tot_parts,
NullS)))
{
my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
DBUG_RETURN(NULL);
}
ft_target->part_ft_info= tmp_ft_info;
(*parent)= ft_target;
}
ft_current= ft_target;
file= m_file;
do
{
if (bitmap_is_set(&(m_part_info->read_partitions), (uint)(file - m_file)))
{
if ((ft_handler= (*file)->ft_init_ext(flags, inx, key)))
(*file)->ft_handler= ft_handler;
else
(*file)->ft_handler= NULL;
ft_target->part_ft_info[file - m_file]= ft_handler;
}
else
{
(*file)->ft_handler= NULL;
ft_target->part_ft_info[file - m_file]= NULL;
}
} while (*(++file));
ft_target->please= &partition_ft_vft;
ft_target->file= this;
DBUG_RETURN((FT_INFO*)ft_target);
}
/**
Return the next record from the FT result set during an ordered index
pre-scan
@param use_parallel Is it a parallel search
@return >0 Error code
0 Success
*/
int ha_partition::pre_ft_read(bool use_parallel)
{
bool save_m_pre_calling;
int error;
DBUG_ENTER("ha_partition::pre_ft_read");
DBUG_PRINT("info", ("partition this: %p", this));
save_m_pre_calling= m_pre_calling;
m_pre_calling= TRUE;
m_pre_call_use_parallel= use_parallel;
error= ft_read(table->record[0]);
m_pre_calling= save_m_pre_calling;
DBUG_RETURN(error);
}
/**
Return the first or next record in a full text search.
@param buf Buffer where the record should be returned
@return >0 Error code
0 Success
*/
int ha_partition::ft_read(uchar *buf)
{
handler *file;
int result= HA_ERR_END_OF_FILE, error;
uint part_id= m_part_spec.start_part;
DBUG_ENTER("ha_partition::ft_read");
DBUG_PRINT("info", ("partition this: %p", this));
DBUG_PRINT("info", ("part_id: %u", part_id));
if (part_id == NO_CURRENT_PART_ID)
{
/*
The original set of partitions to scan was empty and thus we report
the result here.
*/
DBUG_PRINT("info", ("NO_CURRENT_PART_ID"));
goto end;
}
DBUG_ASSERT(m_scan_value == 1);
if (m_ft_init_and_first) // First call to ft_read()
{
m_ft_init_and_first= FALSE;
if (!bulk_access_executing)
{
error= handle_pre_scan(FALSE, check_parallel_search());
if (m_pre_calling || error)
DBUG_RETURN(error);
}
late_extra_cache(part_id);
}
file= m_file[part_id];
while (TRUE)
{
if (!(result= file->ft_read(buf)))
{
/* Found row: remember position and return it. */
m_part_spec.start_part= m_last_part= part_id;
table->status= 0;
DBUG_RETURN(0);
}
/*
if we get here, then the current partition ft_next returned failure
*/
if (result == HA_ERR_RECORD_DELETED)
continue; // Probably MyISAM
if (result != HA_ERR_END_OF_FILE)
goto end_dont_reset_start_part; // Return error
/* End current partition */
late_extra_no_cache(part_id);
DBUG_PRINT("info", ("stopping using partition %d", part_id));
/* Shift to next partition */
while (++part_id < m_tot_parts &&
!bitmap_is_set(&(m_part_info->read_partitions), part_id))
;
if (part_id >= m_tot_parts)
{
result= HA_ERR_END_OF_FILE;
break;
}
m_part_spec.start_part= m_last_part= part_id;
file= m_file[part_id];
DBUG_PRINT("info", ("now using partition %d", part_id));
late_extra_cache(part_id);
}
end:
m_part_spec.start_part= NO_CURRENT_PART_ID;
end_dont_reset_start_part:
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(result);
}
/*
Common routine to set up index scans
......
......@@ -67,6 +67,17 @@ class Parts_share_refs
}
};
class ha_partition;
/* Partition Full Text Search info */
struct st_partition_ft_info
{
struct _ft_vft *please;
st_partition_ft_info *next;
ha_partition *file;
FT_INFO **part_ft_info;
};
/**
Partition specific Handler_share.
......@@ -164,6 +175,8 @@ class ha_partition :public handler
partition_info *m_part_info; // local reference to partition
Field **m_part_field_array; // Part field array locally to save acc
uchar *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan
st_partition_ft_info *ft_first;
st_partition_ft_info *ft_current;
/*
Current index.
When used in key_rec_cmp: If clustered pk, index compare
......@@ -229,6 +242,7 @@ class ha_partition :public handler
bool m_is_sub_partitioned; // Is subpartitioned
bool m_ordered_scan_ongoing;
bool m_rnd_init_and_first;
bool m_ft_init_and_first;
/*
If set, this object was created with ha_partition::clone and doesn't
......@@ -283,6 +297,9 @@ class ha_partition :public handler
enum_monotonicity_info m_part_func_monotonicity_info;
bool m_pre_calling;
bool m_pre_call_use_parallel;
/* Keep track of bulk access requests */
bool bulk_access_executing;
/** keep track of locked partitions */
MY_BITMAP m_locked_partitions;
/** Stores shared auto_increment etc. */
......@@ -333,6 +350,7 @@ class ha_partition :public handler
ha_partition *clone_arg,
MEM_ROOT *clone_mem_root_arg);
~ha_partition();
void ha_partition_init();
/*
A partition handler has no characteristics in itself. It only inherits
those from the underlying handlers. Here we set-up those constants to
......@@ -693,7 +711,7 @@ class ha_partition :public handler
virtual int multi_range_read_next(range_id_t *range_info);
virtual int multi_range_read_explain_info(uint mrr_mode, char *str,
size_t size);
uint last_part() { return m_last_part; }
private:
bool init_record_priority_queue();
......@@ -992,7 +1010,7 @@ class ha_partition :public handler
special file for handling names of partitions, engine types.
HA_REC_NOT_IN_SEQ is always set for partition handler since we cannot
guarantee that the records will be returned in sequence.
HA_CAN_FULLTEXT, HA_DUPLICATE_POS,
HA_DUPLICATE_POS,
HA_CAN_INSERT_DELAYED, HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is disabled
until further investigated.
*/
......@@ -1205,14 +1223,15 @@ class ha_partition :public handler
-------------------------------------------------------------------------
MODULE fulltext index
-------------------------------------------------------------------------
Fulltext stuff not yet.
-------------------------------------------------------------------------
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
virtual FT_INFO *ft_init_ext(uint flags,uint inx,const uchar *key,
uint keylen)
{ return NULL; }
virtual int ft_read(uchar *buf) { return HA_ERR_WRONG_COMMAND; }
*/
void ft_close_search(FT_INFO *handler);
virtual int ft_init();
virtual int pre_ft_init();
virtual void ft_end();
virtual int pre_ft_end();
virtual FT_INFO *ft_init_ext(uint flags, uint inx, String *key);
virtual int ft_read(uchar *buf);
virtual int pre_ft_read(bool use_parallel);
/*
-------------------------------------------------------------------------
......
......@@ -3312,7 +3312,9 @@ class handler :public Sql_alloc
int compare_key(key_range *range);
int compare_key2(key_range *range) const;
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
void ft_end() { ft_handler=NULL; }
virtual int pre_ft_init() { return HA_ERR_WRONG_COMMAND; }
virtual void ft_end() {}
virtual int pre_ft_end() { return 0; }
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
{ return NULL; }
private:
......@@ -3335,6 +3337,7 @@ class handler :public Sql_alloc
/* Same as above, but with statistics */
inline int ha_ft_read(uchar *buf);
inline void ha_ft_end() { ft_end(); ft_handler=NULL; }
int ha_rnd_next(uchar *buf);
int ha_rnd_pos(uchar *buf, uchar *pos);
inline int ha_rnd_pos_by_record(uchar *buf);
......
......@@ -22144,7 +22144,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
table->file->ha_end_keyread();
if (tab->type == JT_FT)
table->file->ft_end();
table->file->ha_ft_end();
else
table->file->ha_index_or_rnd_end();
......
--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
--let $MASTER_1_COMMENT_2_2= $MASTER_1_COMMENT_2_2_BACKUP
--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
--let $CHILD2_2_DROP_TABLES= $CHILD2_2_DROP_TABLES_BACKUP
--let $CHILD2_2_CREATE_TABLES= $CHILD2_2_CREATE_TABLES_BACKUP
--let $CHILD2_2_SELECT_TABLES= $CHILD2_2_SELECT_TABLES_BACKUP
--let $CHILD2_3_DROP_TABLES= $CHILD2_3_DROP_TABLES_BACKUP
--let $CHILD2_3_CREATE_TABLES= $CHILD2_3_CREATE_TABLES_BACKUP
--let $CHILD2_3_SELECT_TABLES= $CHILD2_3_SELECT_TABLES_BACKUP
--let $OUTPUT_CHILD_GROUP2= $OUTPUT_CHILD_GROUP2_BACKUP
--let $USE_GENERAL_LOG= $USE_GENERAL_LOG_BACKUP
--connection master_1
set session join_cache_level= @old_join_cache_level;
set session optimizer_switch= @old_optimizer_switch;
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_result_log
--enable_query_log
--enable_warnings
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_init.inc
--enable_result_log
--enable_query_log
--enable_warnings
--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
let $MASTER_1_COMMENT_2_1=
COMMENT='table "tbl_a", bka_mode "1"'
PARTITION BY KEY(pkey) (
PARTITION pt1 COMMENT='srv "s_2_1"',
PARTITION pt2 COMMENT='srv "s_2_2"',
PARTITION pt3 COMMENT='srv "s_2_3"'
);
--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
let $CHILD2_1_DROP_TABLES=
DROP TABLE IF EXISTS tbl_a;
--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
let $CHILD2_1_CREATE_TABLES=
CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
let $CHILD2_1_SELECT_TABLES=
SELECT pkey FROM tbl_a ORDER BY pkey;
let $CHILD2_1_SELECT_ARGUMENT1=
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
--let $CHILD2_2_DROP_TABLES_BACKUP= $CHILD2_2_DROP_TABLES
let $CHILD2_2_DROP_TABLES=
DROP TABLE IF EXISTS tbl_a;
--let $CHILD2_2_CREATE_TABLES_BACKUP= $CHILD2_2_CREATE_TABLES
let $CHILD2_2_CREATE_TABLES=
CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) $CHILD2_2_ENGINE $CHILD2_2_CHARSET;
--let $CHILD2_2_SELECT_TABLES_BACKUP= $CHILD2_2_SELECT_TABLES
let $CHILD2_2_SELECT_TABLES=
SELECT pkey FROM tbl_a ORDER BY pkey;
let $CHILD2_2_SELECT_ARGUMENT1=
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
--let $CHILD2_3_DROP_TABLES_BACKUP= $CHILD2_3_DROP_TABLES
let $CHILD2_3_DROP_TABLES=
DROP TABLE IF EXISTS tbl_a;
--let $CHILD2_3_CREATE_TABLES_BACKUP= $CHILD2_3_CREATE_TABLES
let $CHILD2_3_CREATE_TABLES=
CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) $CHILD2_3_ENGINE $CHILD2_3_CHARSET;
--let $CHILD2_3_SELECT_TABLES_BACKUP= $CHILD2_3_SELECT_TABLES
let $CHILD2_3_SELECT_TABLES=
SELECT pkey FROM tbl_a ORDER BY pkey;
let $CHILD2_3_SELECT_ARGUMENT1=
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
--let $OUTPUT_CHILD_GROUP2_BACKUP= $OUTPUT_CHILD_GROUP2
--let $OUTPUT_CHILD_GROUP2= 1
--let $USE_GENERAL_LOG_BACKUP= $USE_GENERAL_LOG
--let $USE_GENERAL_LOG= 1
--connection master_1
set @old_join_cache_level= @@join_cache_level;
set session join_cache_level= 5;
set @old_optimizer_switch= @@optimizer_switch;
set session optimizer_switch= 'mrr=on';
for master_1
for child2
child2_1
child2_2
child2_3
for child3
child3_1
child3_2
child3_3
connection master_1;
set @old_join_cache_level= @@join_cache_level;
set session join_cache_level= 5;
set @old_optimizer_switch= @@optimizer_switch;
set session optimizer_switch= 'mrr=on';
drop and create databases
connection master_1;
DROP DATABASE IF EXISTS auto_test_local;
CREATE DATABASE auto_test_local;
USE auto_test_local;
connection child2_1;
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
DROP DATABASE IF EXISTS auto_test_remote;
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
connection child2_2;
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
DROP DATABASE IF EXISTS auto_test_remote2;
CREATE DATABASE auto_test_remote2;
USE auto_test_remote2;
connection child2_3;
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
DROP DATABASE IF EXISTS auto_test_remote3;
CREATE DATABASE auto_test_remote3;
USE auto_test_remote3;
create table and insert
connection child2_1;
CHILD2_1_DROP_TABLES
CHILD2_1_CREATE_TABLES
TRUNCATE TABLE mysql.general_log;
connection child2_2;
CHILD2_2_DROP_TABLES
CHILD2_2_CREATE_TABLES
TRUNCATE TABLE mysql.general_log;
connection child2_3;
CHILD2_3_DROP_TABLES
CHILD2_3_CREATE_TABLES
TRUNCATE TABLE mysql.general_log;
connection master_1;
DROP TABLE IF EXISTS tbl_a;
DROP TABLE IF EXISTS tbl_b;
CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
INSERT INTO tbl_a (pkey, words) VALUES (0, 'abc'),(1, 'def'),(2, 'ghi'),(3, 'jkl'),(4, 'mno'),(5, 'pqr'),(6, 'stu'),(7, 'vwx');
select test
connection child2_1;
TRUNCATE TABLE mysql.general_log;
connection master_1;
SELECT pkey, words FROM tbl_a WHERE match(words) against('+ghi' in boolean mode);
pkey words
2 ghi
connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
4
5
connection child2_2;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote2`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
0
1
6
7
connection child2_3;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote3`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
2
3
deinit
connection master_1;
DROP DATABASE IF EXISTS auto_test_local;
connection child2_1;
DROP DATABASE IF EXISTS auto_test_remote;
SET GLOBAL log_output = @old_log_output;
connection child2_2;
DROP DATABASE IF EXISTS auto_test_remote2;
SET GLOBAL log_output = @old_log_output;
connection child2_3;
DROP DATABASE IF EXISTS auto_test_remote3;
SET GLOBAL log_output = @old_log_output;
connection master_1;
set session join_cache_level= @old_join_cache_level;
set session optimizer_switch= @old_optimizer_switch;
for master_1
for child2
child2_1
child2_2
child2_3
for child3
child3_1
child3_2
child3_3
end of test
--source ../include/partition_fulltext_init.inc
if (!$HAVE_PARTITION)
{
--source ../include/partition_fulltext_deinit.inc
skip Test requires partitioning;
}
--echo
--echo drop and create databases
--connection master_1
--disable_warnings
DROP DATABASE IF EXISTS auto_test_local;
CREATE DATABASE auto_test_local;
USE auto_test_local;
if ($USE_CHILD_GROUP2)
{
--connection child2_1
if ($USE_GENERAL_LOG)
{
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
}
DROP DATABASE IF EXISTS auto_test_remote;
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
--connection child2_2
if ($USE_GENERAL_LOG)
{
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
}
DROP DATABASE IF EXISTS auto_test_remote2;
CREATE DATABASE auto_test_remote2;
USE auto_test_remote2;
--connection child2_3
if ($USE_GENERAL_LOG)
{
SET @old_log_output = @@global.log_output;
SET GLOBAL log_output = 'TABLE,FILE';
}
DROP DATABASE IF EXISTS auto_test_remote3;
CREATE DATABASE auto_test_remote3;
USE auto_test_remote3;
}
--enable_warnings
--echo
--echo create table and insert
if ($USE_CHILD_GROUP2)
{
if (!$OUTPUT_CHILD_GROUP2)
{
--disable_query_log
--disable_result_log
}
--connection child2_1
if ($OUTPUT_CHILD_GROUP2)
{
--disable_query_log
echo CHILD2_1_DROP_TABLES;
echo CHILD2_1_CREATE_TABLES;
}
--disable_warnings
eval $CHILD2_1_DROP_TABLES;
--enable_warnings
eval $CHILD2_1_CREATE_TABLES;
if ($OUTPUT_CHILD_GROUP2)
{
--enable_query_log
}
if ($USE_GENERAL_LOG)
{
TRUNCATE TABLE mysql.general_log;
}
--connection child2_2
if ($OUTPUT_CHILD_GROUP2)
{
--disable_query_log
echo CHILD2_2_DROP_TABLES;
echo CHILD2_2_CREATE_TABLES;
}
--disable_warnings
eval $CHILD2_2_DROP_TABLES;
--enable_warnings
eval $CHILD2_2_CREATE_TABLES;
if ($OUTPUT_CHILD_GROUP2)
{
--enable_query_log
}
if ($USE_GENERAL_LOG)
{
TRUNCATE TABLE mysql.general_log;
}
--connection child2_3
if ($OUTPUT_CHILD_GROUP2)
{
--disable_query_log
echo CHILD2_3_DROP_TABLES;
echo CHILD2_3_CREATE_TABLES;
}
--disable_warnings
eval $CHILD2_3_DROP_TABLES;
--enable_warnings
eval $CHILD2_3_CREATE_TABLES;
if ($OUTPUT_CHILD_GROUP2)
{
--enable_query_log
}
if ($USE_GENERAL_LOG)
{
TRUNCATE TABLE mysql.general_log;
}
if (!$OUTPUT_CHILD_GROUP2)
{
--enable_query_log
--enable_result_log
}
}
--connection master_1
--disable_warnings
DROP TABLE IF EXISTS tbl_a;
DROP TABLE IF EXISTS tbl_b;
--enable_warnings
--disable_query_log
echo CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
eval CREATE TABLE tbl_a (
pkey int NOT NULL,
words text NOT NULL,
PRIMARY KEY (pkey),
FULLTEXT (words)
) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
--enable_query_log
INSERT INTO tbl_a (pkey, words) VALUES (0, 'abc'),(1, 'def'),(2, 'ghi'),(3, 'jkl'),(4, 'mno'),(5, 'pqr'),(6, 'stu'),(7, 'vwx');
--echo
--echo select test
if ($USE_CHILD_GROUP2)
{
if (!$OUTPUT_CHILD_GROUP2)
{
--disable_query_log
--disable_result_log
}
--connection child2_1
if ($USE_GENERAL_LOG)
{
TRUNCATE TABLE mysql.general_log;
}
if (!$OUTPUT_CHILD_GROUP2)
{
--enable_query_log
--enable_result_log
}
}
--connection master_1
SELECT pkey, words FROM tbl_a WHERE match(words) against('+ghi' in boolean mode);
if ($USE_CHILD_GROUP2)
{
if (!$OUTPUT_CHILD_GROUP2)
{
--disable_query_log
--disable_result_log
}
--connection child2_1
if ($USE_GENERAL_LOG)
{
eval $CHILD2_1_SELECT_ARGUMENT1;
}
eval $CHILD2_1_SELECT_TABLES;
--connection child2_2
if ($USE_GENERAL_LOG)
{
eval $CHILD2_2_SELECT_ARGUMENT1;
}
eval $CHILD2_2_SELECT_TABLES;
--connection child2_3
if ($USE_GENERAL_LOG)
{
eval $CHILD2_3_SELECT_ARGUMENT1;
}
eval $CHILD2_3_SELECT_TABLES;
if (!$OUTPUT_CHILD_GROUP2)
{
--enable_query_log
--enable_result_log
}
}
--echo
--echo deinit
--disable_warnings
--connection master_1
DROP DATABASE IF EXISTS auto_test_local;
if ($USE_CHILD_GROUP2)
{
--connection child2_1
DROP DATABASE IF EXISTS auto_test_remote;
if ($USE_GENERAL_LOG)
{
SET GLOBAL log_output = @old_log_output;
}
--connection child2_2
DROP DATABASE IF EXISTS auto_test_remote2;
if ($USE_GENERAL_LOG)
{
SET GLOBAL log_output = @old_log_output;
}
--connection child2_3
DROP DATABASE IF EXISTS auto_test_remote3;
if ($USE_GENERAL_LOG)
{
SET GLOBAL log_output = @old_log_output;
}
}
--enable_warnings
--source ../include/partition_fulltext_deinit.inc
--echo
--echo end of test
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment