Commit d6594847 authored by Jacob Mathew's avatar Jacob Mathew

MDEV-16246: insert timestamp into spider table from mysqldump gets wrong time zone.

The problem occurred because the Spider node was incorrectly handling
timestamp values sent to and received from the data nodes.

The problem has been corrected as follows:
- Added logic to set and maintain the UTC time zone on the data nodes.
  To prevent timestamp ambiguity, it is necessary for the data nodes to use
  a time zone such as UTC which does not have daylight savings time.
- Removed the spider_sync_time_zone configuration variable, which did not
  solve the problem and which interfered with the solution.
- Added logic to convert to the UTC time zone all timestamp values sent to
  and received from the data nodes.  This is done for both unique and
  non-unique timestamp columns.  It is done for WHERE clauses, applying to
  SELECT, UPDATE and DELETE statements, and for UPDATE columns.
- Disabled Spider's use of direct update when any of the columns to update is
  a timestamp column.  This is necessary to prevent false duplicate key value
  errors.
- Added a new test spider.timestamp to thoroughly test Spider's handling of
  timestamp values.

Author:
  Jacob Mathew.

Reviewer:
  Kentoku Shiba.

Merged:
  Commit 97cc9d34 on branch bb-10.3-MDEV-16246
parent a78d1aaa
......@@ -11141,13 +11141,14 @@ int ha_partition::end_bulk_delete()
SYNOPSIS
direct_update_rows_init()
update fields Pointer to the list of fields to update
RETURN VALUE
>0 Error
0 Success
*/
int ha_partition::direct_update_rows_init()
int ha_partition::direct_update_rows_init(List<Item> *update_fields)
{
int error;
uint i, found;
......@@ -11173,8 +11174,8 @@ int ha_partition::direct_update_rows_init()
{
file= m_file[i];
if (unlikely((error= (m_pre_calling ?
file->pre_direct_update_rows_init() :
file->direct_update_rows_init()))))
file->pre_direct_update_rows_init(update_fields) :
file->direct_update_rows_init(update_fields)))))
{
DBUG_PRINT("info", ("partition FALSE by storage engine"));
DBUG_RETURN(error);
......@@ -11212,20 +11213,21 @@ int ha_partition::direct_update_rows_init()
SYNOPSIS
pre_direct_update_rows_init()
update fields Pointer to the list of fields to update
RETURN VALUE
>0 Error
0 Success
*/
int ha_partition::pre_direct_update_rows_init()
int ha_partition::pre_direct_update_rows_init(List<Item> *update_fields)
{
bool save_m_pre_calling;
int error;
DBUG_ENTER("ha_partition::pre_direct_update_rows_init");
save_m_pre_calling= m_pre_calling;
m_pre_calling= TRUE;
error= direct_update_rows_init();
error= direct_update_rows_init(update_fields);
m_pre_calling= save_m_pre_calling;
DBUG_RETURN(error);
}
......
......@@ -620,8 +620,8 @@ class ha_partition :public handler
virtual int bulk_update_row(const uchar *old_data, const uchar *new_data,
ha_rows *dup_key_found);
virtual int update_row(const uchar * old_data, const uchar * new_data);
virtual int direct_update_rows_init();
virtual int pre_direct_update_rows_init();
virtual int direct_update_rows_init(List<Item> *update_fields);
virtual int pre_direct_update_rows_init(List<Item> *update_fields);
virtual int direct_update_rows(ha_rows *update_rows);
virtual int pre_direct_update_rows();
virtual bool start_bulk_delete();
......
......@@ -4407,12 +4407,12 @@ class handler :public Sql_alloc
/* Perform initialization for a direct update request */
public:
int ha_direct_update_rows(ha_rows *update_rows);
virtual int direct_update_rows_init()
virtual int direct_update_rows_init(List<Item> *update_fields)
{
return HA_ERR_WRONG_COMMAND;
}
private:
virtual int pre_direct_update_rows_init()
virtual int pre_direct_update_rows_init(List<Item> *update_fields)
{
return HA_ERR_WRONG_COMMAND;
}
......
......@@ -615,6 +615,9 @@ int mysql_update(THD *thd,
- Note that Spider can handle ORDER BY and LIMIT in a cluster with
one data node. These conditions are therefore checked in
direct_update_rows_init().
- Update fields include a unique timestamp field
- The storage engine may not be able to avoid false duplicate key
errors. This condition is checked in direct_update_rows_init().
Direct update does not require a WHERE clause
......@@ -637,7 +640,7 @@ int mysql_update(THD *thd,
if (!table->file->info_push(INFO_KIND_UPDATE_FIELDS, &fields) &&
!table->file->info_push(INFO_KIND_UPDATE_VALUES, &values) &&
!table->file->direct_update_rows_init())
!table->file->direct_update_rows_init(&fields))
{
do_direct_update= TRUE;
......
......@@ -10064,13 +10064,11 @@ int ha_spider::update_row(
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
int ha_spider::direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
const uchar *new_data
) {
int ha_spider::direct_update_rows_init(List<Item> *update_fields, uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count, bool sorted,
const uchar *new_data)
{
#if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET)
int error_num;
#endif
......@@ -10098,7 +10096,7 @@ int ha_spider::direct_update_rows_init(
DBUG_RETURN(pre_direct_init_result);
}
DBUG_RETURN(bulk_access_link_exec_tgt->spider->direct_update_rows_init(
mode, ranges, range_count, sorted, new_data));
update_fields, mode, ranges, range_count, sorted, new_data));
}
#endif
direct_update_init(
......@@ -10202,14 +10200,46 @@ int ha_spider::direct_update_rows_init(
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
#else
int ha_spider::direct_update_rows_init()
/**
Perform initialization for a direct update request.
@param update fields Pointer to the list of fields to update.
@return >0 Error.
0 Success.
*/
int ha_spider::direct_update_rows_init(List<Item> *update_fields)
{
st_select_lex *select_lex;
longlong select_limit;
longlong offset_limit;
List_iterator<Item> it(*update_fields);
Item *item;
Field *field;
THD *thd = trx->thd;
DBUG_ENTER("ha_spider::direct_update_rows_init");
DBUG_PRINT("info",("spider this=%p", this));
while ((item = it++))
{
if (item->type() == Item::FIELD_ITEM)
{
field = ((Item_field *)item)->field;
if (field->type() == FIELD_TYPE_TIMESTAMP &&
field->flags & UNIQUE_KEY_FLAG)
{
/*
Spider cannot perform direct update on unique timestamp fields.
To avoid false duplicate key errors, the table needs to be
updated one row at a time.
*/
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
}
}
#ifdef HA_CAN_BULK_ACCESS
if (
bulk_access_executing &&
......@@ -10227,7 +10257,8 @@ int ha_spider::direct_update_rows_init()
pre_direct_init_result));
DBUG_RETURN(pre_direct_init_result);
}
DBUG_RETURN(bulk_access_link_exec_tgt->spider->direct_update_rows_init());
DBUG_RETURN(bulk_access_link_exec_tgt->spider->
direct_update_rows_init(List<Item> *update_fields));
}
#endif
direct_update_init(
......@@ -10298,31 +10329,41 @@ int ha_spider::direct_update_rows_init()
#ifdef HA_CAN_BULK_ACCESS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
int ha_spider::pre_direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
const uchar *new_data
) {
int ha_spider::pre_direct_update_rows_init(List<Item> *update_fields,
uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count, bool sorted,
const uchar *new_data)
{
int error_num;
DBUG_ENTER("ha_spider::pre_direct_update_rows_init");
DBUG_PRINT("info",("spider this=%p", this));
if (bulk_access_started)
{
error_num = bulk_access_link_current->spider->
pre_direct_update_rows_init(
mode, ranges, range_count, sorted, new_data);
pre_direct_update_rows_init(update_fields, mode, ranges, range_count,
sorted, new_data);
bulk_access_link_current->spider->bulk_access_pre_called = TRUE;
bulk_access_link_current->called = TRUE;
DBUG_RETURN(error_num);
}
pre_direct_init_result = direct_update_rows_init(
mode, ranges, range_count, sorted, new_data);
pre_direct_init_result = direct_update_rows_init(update_fields, mode,
ranges, range_count,
sorted, new_data);
DBUG_RETURN(pre_direct_init_result);
}
#else
int ha_spider::pre_direct_update_rows_init()
/**
Do initialization for performing parallel direct update
for a handlersocket update request.
@param update fields Pointer to the list of fields to update.
@return >0 Error.
0 Success.
*/
int ha_spider::pre_direct_update_rows_init(List<Item> *update_fields)
{
int error_num;
DBUG_ENTER("ha_spider::pre_direct_update_rows_init");
......@@ -10330,12 +10371,12 @@ int ha_spider::pre_direct_update_rows_init()
if (bulk_access_started)
{
error_num = bulk_access_link_current->spider->
pre_direct_update_rows_init();
pre_direct_update_rows_init(update_fields);
bulk_access_link_current->spider->bulk_access_pre_called = TRUE;
bulk_access_link_current->called = TRUE;
DBUG_RETURN(error_num);
}
pre_direct_init_result = direct_update_rows_init();
pre_direct_init_result = direct_update_rows_init(update_fields);
DBUG_RETURN(pre_direct_init_result);
}
#endif
......@@ -15733,8 +15774,9 @@ int ha_spider::print_item_type(
dbton_hdl = dbton_handler[dbton_id];
if (
dbton_hdl->first_link_idx >= 0 &&
(error_num = spider_db_print_item_type(item, this, str,
alias, alias_length, dbton_id, FALSE, NULL))
(error_num = spider_db_print_item_type(item, NULL, this, str,
alias, alias_length, dbton_id,
FALSE, NULL))
) {
DBUG_RETURN(error_num);
}
......
......@@ -587,35 +587,29 @@ class ha_spider: public handler
);
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
inline int direct_update_rows_init()
inline int direct_update_rows_init(List<Item> *update_fields)
{
return direct_update_rows_init(2, NULL, 0, FALSE, NULL);
return direct_update_rows_init(update_fields, 2, NULL, 0, FALSE, NULL);
}
int direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
const uchar *new_data
);
int direct_update_rows_init(List<Item> *update_fields, uint mode,
KEY_MULTI_RANGE *ranges, uint range_count,
bool sorted, const uchar *new_data);
#else
int direct_update_rows_init();
int direct_update_rows_init(List<Item> *update_fields);
#endif
#ifdef HA_CAN_BULK_ACCESS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
inline int pre_direct_update_rows_init()
inline int pre_direct_update_rows_init(List<Item> *update_fields)
{
return pre_direct_update_rows_init(2, NULL, 0, FALSE, NULL);
return pre_direct_update_rows_init(update_fields,
2, NULL, 0, FALSE, NULL);
}
int pre_direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
uchar *new_data
);
int pre_direct_update_rows_init(List<Item> *update_fields,
uint mode, KEY_MULTI_RANGE *ranges,
uint range_count, bool sorted,
uchar *new_data);
#else
int pre_direct_update_rows_init();
int pre_direct_update_rows_init(List<Item> *update_fields);
#endif
#endif
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
......
This diff is collapsed.
This diff is collapsed.
--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
--let $OUTPUT_CHILD_GROUP2= $OUTPUT_CHILD_GROUP2_BACKUP
--let $USE_GENERAL_LOG= $USE_GENERAL_LOG_BACKUP
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_result_log
--enable_query_log
--enable_warnings
--disable_warnings
--disable_query_log
--disable_result_log
--source ../t/test_init.inc
--enable_result_log
--enable_query_log
--enable_warnings
--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
let $MASTER_1_COMMENT_2_1=
COMMENT='database "ts_test_remote", table "tbl_a", srv "s_2_1"';
let $MASTER_1_AUTO_INCREMENT_2_1=
AUTO_INCREMENT=17 DEFAULT CHARSET=utf8mb4;
let $MASTER_1_AUTO_INCREMENT1=
AUTO_INCREMENT=17 DEFAULT CHARSET=utf8mb4;
let $MASTER_1_AUTO_INCREMENT2=
AUTO_INCREMENT=17 DEFAULT CHARSET=utf8mb4;
let $CHILD2_1_AUTO_INCREMENT=
AUTO_INCREMENT=17 DEFAULT CHARSET=utf8mb4;
--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
let $CHILD2_1_DROP_TABLES=
DROP TABLE IF EXISTS tbl_a;
--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
let $CHILD2_1_CREATE_TABLES=
CREATE TABLE tbl_a (
col_a INT UNSIGNED NOT NULL AUTO_INCREMENT,
col_dt DATETIME,
col_ts TIMESTAMP NOT NULL
DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY(col_a),
UNIQUE INDEX i_ts (col_ts)
) $CHILD2_1_ENGINE $CHILD2_1_AUTO_INCREMENT;
--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
let $CHILD2_1_SELECT_TABLES=
SELECT col_a, col_dt, col_ts, unix_timestamp(col_ts) FROM tbl_a ORDER BY col_a;
let $CHILD2_1_SELECT_ARGUMENT1=
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
let $CHILD2_1_DROP_INDEX=
DROP INDEX i_ts ON tbl_a;
let $CHILD2_1_SHOW_CREATE_TABLE=
SHOW CREATE TABLE tbl_a;
--let $OUTPUT_CHILD_GROUP2_BACKUP= $OUTPUT_CHILD_GROUP2
--let $OUTPUT_CHILD_GROUP2= 1
--let $USE_GENERAL_LOG_BACKUP= $USE_GENERAL_LOG
--let $USE_GENERAL_LOG= 1
......@@ -88,6 +88,9 @@ extern PSI_thread_key spd_key_thd_bg_mon;
#endif
#endif
/* UTC time zone for timestamp columns */
extern Time_zone *UTC;
HASH spider_open_connections;
uint spider_open_connections_id;
HASH spider_ipport_conns;
......@@ -454,6 +457,13 @@ SPIDER_CONN *spider_create_conn(
char *tmp_ssl_cipher, *tmp_ssl_key, *tmp_default_file, *tmp_default_group;
DBUG_ENTER("spider_create_conn");
if (unlikely(!UTC))
{
/* UTC time zone for timestamp columns */
String tz_00_name(STRING_WITH_LEN("+00:00"), &my_charset_bin);
UTC = my_tz_find(current_thd, &tz_00_name);
}
#if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET)
if (conn_kind == SPIDER_CONN_KIND_MYSQL)
{
......@@ -1429,6 +1439,14 @@ void spider_conn_queue_time_zone(
DBUG_VOID_RETURN;
}
void spider_conn_queue_UTC_time_zone(SPIDER_CONN *conn)
{
DBUG_ENTER("spider_conn_queue_time_zone");
DBUG_PRINT("info", ("spider conn=%p", conn));
spider_conn_queue_time_zone(conn, UTC);
DBUG_VOID_RETURN;
}
void spider_conn_queue_start_transaction(
SPIDER_CONN *conn
) {
......
......@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "tztime.h"
#define SPIDER_LOCK_MODE_NO_LOCK 0
#define SPIDER_LOCK_MODE_SHARED 1
#define SPIDER_LOCK_MODE_EXCLUSIVE 2
......@@ -137,6 +139,10 @@ void spider_conn_queue_time_zone(
Time_zone *time_zone
);
void spider_conn_queue_UTC_time_zone(
SPIDER_CONN *conn
);
void spider_conn_queue_start_transaction(
SPIDER_CONN *conn
);
......
This diff is collapsed.
......@@ -838,8 +838,18 @@ int spider_db_flush_logs(
ha_spider *spider
);
Field *spider_db_find_field_in_item_list(
Item **item_list,
uint item_count,
uint start_item,
spider_string *str,
const char *func_name,
int func_name_length
);
int spider_db_print_item_type(
Item *item,
Field *field,
ha_spider *spider,
spider_string *str,
const char *alias,
......@@ -930,6 +940,7 @@ int spider_db_open_item_row(
int spider_db_open_item_string(
Item *item,
Field *field,
ha_spider *spider,
spider_string *str,
const char *alias,
......@@ -941,6 +952,7 @@ int spider_db_open_item_string(
int spider_db_open_item_int(
Item *item,
Field *field,
ha_spider *spider,
spider_string *str,
const char *alias,
......@@ -952,6 +964,7 @@ int spider_db_open_item_int(
int spider_db_open_item_cache(
Item_cache *item_cache,
Field *field,
ha_spider *spider,
spider_string *str,
const char *alias,
......@@ -963,6 +976,7 @@ int spider_db_open_item_cache(
int spider_db_open_item_insert_value(
Item_insert_value *item_insert_value,
Field *field,
ha_spider *spider,
spider_string *str,
const char *alias,
......
......@@ -17,6 +17,7 @@
#if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET)
#include "hstcpcli.hpp"
#endif
#include "tztime.h"
#define SPIDER_DBTON_SIZE 15
......
This diff is collapsed.
......@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "tztime.h"
class spider_db_mysql_util: public spider_db_util
{
public:
......
......@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "tztime.h"
class spider_db_oracle;
class spider_db_oracle_result;
......
......@@ -1727,8 +1727,8 @@ group_by_handler *spider_create_group_by_handler(
while ((item = it++))
{
DBUG_PRINT("info",("spider select item=%p", item));
if (spider_db_print_item_type(item, spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
if (spider_db_print_item_type(item, NULL, spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
{
DBUG_PRINT("info",("spider dbton_id=%d can't create select", roop_count));
spider_clear_bit(dbton_bitmap, roop_count);
......@@ -1741,8 +1741,9 @@ group_by_handler *spider_create_group_by_handler(
DBUG_PRINT("info",("spider query->where=%p", query->where));
if (query->where)
{
if (spider_db_print_item_type(query->where, spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
if (spider_db_print_item_type(query->where, NULL, spider, NULL,
NULL, 0, roop_count,
TRUE, fields_arg))
{
DBUG_PRINT("info",("spider dbton_id=%d can't create where", roop_count));
spider_clear_bit(dbton_bitmap, roop_count);
......@@ -1757,8 +1758,9 @@ group_by_handler *spider_create_group_by_handler(
{
for (order = query->group_by; order; order = order->next)
{
if (spider_db_print_item_type((*order->item), spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
if (spider_db_print_item_type((*order->item), NULL, spider, NULL,
NULL, 0, roop_count,
TRUE, fields_arg))
{
DBUG_PRINT("info",("spider dbton_id=%d can't create group by", roop_count));
spider_clear_bit(dbton_bitmap, roop_count);
......@@ -1775,8 +1777,9 @@ group_by_handler *spider_create_group_by_handler(
{
for (order = query->order_by; order; order = order->next)
{
if (spider_db_print_item_type((*order->item), spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
if (spider_db_print_item_type((*order->item), NULL, spider, NULL,
NULL, 0, roop_count,
TRUE, fields_arg))
{
DBUG_PRINT("info",("spider dbton_id=%d can't create order by", roop_count));
spider_clear_bit(dbton_bitmap, roop_count);
......@@ -1791,8 +1794,9 @@ group_by_handler *spider_create_group_by_handler(
DBUG_PRINT("info",("spider query->having=%p", query->having));
if (query->having)
{
if (spider_db_print_item_type(query->having, spider, NULL, NULL, 0,
roop_count, TRUE, fields_arg))
if (spider_db_print_item_type(query->having, NULL, spider, NULL,
NULL, 0, roop_count,
TRUE, fields_arg))
{
DBUG_PRINT("info",("spider dbton_id=%d can't create having", roop_count));
spider_clear_bit(dbton_bitmap, roop_count);
......
......@@ -13,6 +13,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "tztime.h"
#define SPIDER_DETAIL_VERSION "3.3.13"
#define SPIDER_HEX_VERSION 0x0303
......
......@@ -928,26 +928,6 @@ bool spider_param_sync_autocommit(
DBUG_RETURN(THDVAR(thd, sync_autocommit));
}
/*
FALSE: no sync
TRUE: sync
*/
static MYSQL_THDVAR_BOOL(
sync_time_zone, /* name */
PLUGIN_VAR_OPCMDARG, /* opt */
"Sync time_zone", /* comment */
NULL, /* check */
NULL, /* update */
FALSE /* def */
);
bool spider_param_sync_time_zone(
THD *thd
) {
DBUG_ENTER("spider_param_sync_time_zone");
DBUG_RETURN(THDVAR(thd, sync_time_zone));
}
/*
FALSE: not use
TRUE: use
......@@ -3332,7 +3312,6 @@ static struct st_mysql_sys_var* spider_system_variables[] = {
MYSQL_SYSVAR(block_size),
MYSQL_SYSVAR(selupd_lock_mode),
MYSQL_SYSVAR(sync_autocommit),
MYSQL_SYSVAR(sync_time_zone),
MYSQL_SYSVAR(use_default_database),
MYSQL_SYSVAR(internal_sql_log_off),
MYSQL_SYSVAR(bulk_size),
......
......@@ -107,9 +107,6 @@ int spider_param_selupd_lock_mode(
bool spider_param_sync_autocommit(
THD *thd
);
bool spider_param_sync_time_zone(
THD *thd
);
bool spider_param_use_default_database(
THD *thd
);
......
......@@ -26,6 +26,7 @@
#include "sql_class.h"
#include "sql_partition.h"
#include "records.h"
#include "tztime.h"
#endif
#include "spd_err.h"
#include "spd_param.h"
......@@ -1840,7 +1841,6 @@ int spider_internal_start_trx(
SPIDER_TRX *trx = spider->trx;
THD *thd = trx->thd;
bool sync_autocommit = spider_param_sync_autocommit(thd);
bool sync_time_zone = spider_param_sync_time_zone(thd);
double ping_interval_at_trx_start =
spider_param_ping_interval_at_trx_start(thd);
bool xa_lock = FALSE;
......@@ -1867,9 +1867,6 @@ int spider_internal_start_trx(
if (
(error_num = spider_check_and_set_sql_log_off(thd, conn,
&spider->need_mons[link_idx])) ||
(sync_time_zone &&
(error_num = spider_check_and_set_time_zone(thd, conn,
&spider->need_mons[link_idx]))) ||
(sync_autocommit &&
(error_num = spider_check_and_set_autocommit(thd, conn,
&spider->need_mons[link_idx])))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment