Fixes for compiler warnings.

parent 83a5eac0
......@@ -80,6 +80,7 @@ extern FILE *_db_fp_(void);
#define DBUG_ASSERT(A) assert(A)
#define DBUG_EXPLAIN(buf,len) _db_explain_(0, (buf),(len))
#define DBUG_EXPLAIN_INITIAL(buf,len) _db_explain_init_((buf),(len))
#define IF_DBUG(A) A
#else /* No debugger */
#define DBUG_ENTER(a1)
......@@ -106,6 +107,7 @@ extern FILE *_db_fp_(void);
#define DBUG_UNLOCK_FILE
#define DBUG_EXPLAIN(buf,len)
#define DBUG_EXPLAIN_INITIAL(buf,len)
#define IF_DBUG(A)
#endif
#ifdef __cplusplus
}
......
......@@ -6297,10 +6297,12 @@ int Field_string::cmp(const char *a_ptr, const char *b_ptr)
void Field_string::sort_string(char *to,uint length)
{
#ifndef DBUG_OFF
uint tmp= my_strnxfrm(field_charset,
(uchar*) to, length,
(uchar*) ptr, field_length);
DBUG_ASSERT(tmp == length);
#endif
}
......
......@@ -2764,10 +2764,12 @@ int ha_ndbcluster::write_row(byte *record)
{
Ndb *ndb= get_ndb();
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
#ifndef DBUG_OFF
char buff[22];
DBUG_PRINT("info",
("Trying to set next auto increment value to %s",
llstr(next_val, buff)));
#endif
Ndb_tuple_id_range_guard g(m_share);
if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
== -1)
......@@ -3999,7 +4001,7 @@ int ha_ndbcluster::end_bulk_insert()
}
else
{
int res= trans->restart();
IF_DBUG(int res=) trans->restart();
DBUG_ASSERT(res == 0);
}
}
......@@ -4717,7 +4719,7 @@ static int create_ndb_column(NDBCOL &col,
// Set autoincrement
if (field->flags & AUTO_INCREMENT_FLAG)
{
char buff[22];
IF_DBUG(char buff[22]);
col.setAutoIncrement(TRUE);
ulonglong value= info->auto_increment_value ?
info->auto_increment_value : (ulonglong) 1;
......@@ -5388,7 +5390,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
{
DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u",
share->key, share->use_count));
int r= rename_share(share, to);
IF_DBUG(int r=) rename_share(share, to);
DBUG_ASSERT(r == 0);
}
#endif
......@@ -5409,7 +5411,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
#ifdef HAVE_NDB_BINLOG
if (share)
{
int r= rename_share(share, from);
IF_DBUG(int r=) rename_share(share, from);
DBUG_ASSERT(r == 0);
/* ndb_share reference temporary free */
DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u",
......@@ -7268,7 +7270,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
if (share->commit_count != 0)
{
*commit_count= share->commit_count;
char buff[22];
IF_DBUG(char buff[22]);
DBUG_PRINT("info", ("Getting commit_count: %s from share",
llstr(share->commit_count, buff)));
pthread_mutex_unlock(&share->mutex);
......@@ -7304,7 +7306,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
pthread_mutex_lock(&share->mutex);
if (share->commit_count_lock == lock)
{
char buff[22];
IF_DBUG(char buff[22]);
DBUG_PRINT("info", ("Setting commit_count to %s",
llstr(stat.commit_count, buff)));
share->commit_count= stat.commit_count;
......@@ -7363,7 +7365,8 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
char *dbname= full_name;
char *tabname= dbname+strlen(dbname)+1;
char buff[22], buff2[22];
IF_DBUG(char buff[22]);
IF_DBUG(char buff2[22]);
DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d",
dbname, tabname, is_autocommit));
......@@ -7430,7 +7433,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
ulonglong *engine_data)
{
Uint64 commit_count;
char buff[22];
IF_DBUG(char buff[22]);
bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN));
DBUG_ENTER("ha_ndbcluster::register_query_cache_table");
DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d",
......@@ -7875,7 +7878,10 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const
int retries= 10;
int reterr= 0;
int retry_sleep= 30 * 1000; /* 30 milliseconds */
char buff[22], buff2[22], buff3[22], buff4[22];
IF_DBUG(char buff[22]);
IF_DBUG(char buff2[22]);
IF_DBUG(char buff3[22]);
IF_DBUG(char buff4[22]);
DBUG_ENTER("ndb_get_table_statistics");
DBUG_PRINT("enter", ("table: %s", ndbtab->getName()));
......@@ -8693,7 +8699,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb_get_table_statistics(NULL, FALSE, ndb,
ndbtab_g.get_table(), &stat) == 0)
{
char buff[22], buff2[22];
IF_DBUG(char buff[22]);
IF_DBUG(char buff2[22]);
DBUG_PRINT("info",
("Table: %s commit_count: %s rows: %s",
share->key,
......@@ -9545,7 +9552,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
DBUG_PRINT("info", ("INT_ITEM"));
if (context->expecting(Item::INT_ITEM))
{
Item_int *int_item= (Item_int *) item;
IF_DBUG(Item_int *int_item= (Item_int *) item);
DBUG_PRINT("info", ("value %ld", (long) int_item->value));
NDB_ITEM_QUALIFICATION q;
q.value_type= Item::INT_ITEM;
......@@ -9572,7 +9579,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
DBUG_PRINT("info", ("REAL_ITEM"));
if (context->expecting(Item::REAL_ITEM))
{
Item_float *float_item= (Item_float *) item;
IF_DBUG(Item_float *float_item= (Item_float *) item);
DBUG_PRINT("info", ("value %f", float_item->value));
NDB_ITEM_QUALIFICATION q;
q.value_type= Item::REAL_ITEM;
......@@ -9620,7 +9627,7 @@ void ndb_serialize_cond(const Item *item, void *arg)
DBUG_PRINT("info", ("DECIMAL_ITEM"));
if (context->expecting(Item::DECIMAL_ITEM))
{
Item_decimal *decimal_item= (Item_decimal *) item;
IF_DBUG(Item_decimal *decimal_item= (Item_decimal *) item);
DBUG_PRINT("info", ("value %f", decimal_item->val_real()));
NDB_ITEM_QUALIFICATION q;
q.value_type= Item::DECIMAL_ITEM;
......
......@@ -1829,15 +1829,15 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
// fall through
case SOT_CREATE_TABLE:
pthread_mutex_lock(&LOCK_open);
if (ndbcluster_check_if_local_table(schema->db, schema->name))
{
DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
schema->db, schema->name));
if (ndbcluster_check_if_local_table(schema->db, schema->name))
{
DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
schema->db, schema->name));
sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from "
"binlog schema event '%s' from node %d. ",
schema->db, schema->name, schema->query,
schema->node_id);
}
}
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{
sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
......@@ -1854,27 +1854,27 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
log_query= 1;
break;
case SOT_DROP_DB:
/* Drop the database locally if it only contains ndb tables */
if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db))
{
run_query(thd, schema->query,
schema->query + schema->query_length,
TRUE, /* print error */
TRUE); /* don't binlog the query */
/* binlog dropping database after any table operations */
post_epoch_log_list->push_back(schema, mem_root);
/* acknowledge this query _after_ epoch completion */
post_epoch_unlock= 1;
}
else
{
/* Database contained local tables, leave it */
sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables "
/* Drop the database locally if it only contains ndb tables */
if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db))
{
run_query(thd, schema->query,
schema->query + schema->query_length,
TRUE, /* print error */
TRUE); /* don't binlog the query */
/* binlog dropping database after any table operations */
post_epoch_log_list->push_back(schema, mem_root);
/* acknowledge this query _after_ epoch completion */
post_epoch_unlock= 1;
}
else
{
/* Database contained local tables, leave it */
sql_print_error("NDB binlog: Skipping drop database '%s' since it contained local tables "
"binlog schema event '%s' from node %d. ",
schema->db, schema->query,
schema->node_id);
log_query= 1;
}
log_query= 1;
}
break;
case SOT_CREATE_DB:
/* fall through */
......@@ -2121,18 +2121,18 @@ ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd,
share= 0;
}
pthread_mutex_lock(&LOCK_open);
if (ndbcluster_check_if_local_table(schema->db, schema->name))
{
DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
schema->db, schema->name));
if (ndbcluster_check_if_local_table(schema->db, schema->name))
{
DBUG_PRINT("info", ("NDB binlog: Skipping locally defined table '%s.%s'",
schema->db, schema->name));
sql_print_error("NDB binlog: Skipping locally defined table '%s.%s' from "
"binlog schema event '%s' from node %d. ",
schema->db, schema->name, schema->query,
schema->node_id);
}
}
else if (ndb_create_table_from_engine(thd, schema->db, schema->name))
{
sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
{
sql_print_error("NDB binlog: Could not discover table '%s.%s' from "
"binlog schema event '%s' from node %d. my_errno: %d",
schema->db, schema->name, schema->query,
schema->node_id, my_errno);
......@@ -2260,7 +2260,7 @@ int ndb_add_ndb_binlog_index(THD *thd, void *_row)
{
TABLE_LIST *p_binlog_tables= &binlog_tables;
close_tables_for_reopen(thd, &p_binlog_tables);
ndb_binlog_index= 0;
ndb_binlog_index= 0;
continue;
}
sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
......@@ -3225,15 +3225,17 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0], blobs_buffer_size[0],
ptrdiff);
IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0],
blobs_buffer_size[0],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
int ret= trans.write_row(::server_id,
injector::transaction::table(table, TRUE),
&b, n_fields, table->record[0]);
IF_DBUG(int ret=) trans.write_row(::server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields, table->record[0]);
DBUG_ASSERT(ret == 0);
}
break;
......@@ -3251,27 +3253,29 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
n= 0; /*
use the primary key only as it save time and space and
it is the only thing needed to log the delete
*/
*/
else
n= 1; /*
we use the before values since we don't have a primary key
since the mysql server does not handle the hidden primary
key
*/
*/
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[n] - table->record[0];
int ret= get_ndb_blobs_value(table, share->ndb_value[n],
blobs_buffer[n], blobs_buffer_size[n],
ptrdiff);
IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[n],
blobs_buffer[n],
blobs_buffer_size[n],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
DBUG_EXECUTE("info", print_records(table, table->record[n]););
int ret= trans.delete_row(::server_id,
injector::transaction::table(table, TRUE),
&b, n_fields, table->record[n]);
IF_DBUG(int ret =) trans.delete_row(::server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields, table->record[n]);
DBUG_ASSERT(ret == 0);
}
break;
......@@ -3283,9 +3287,10 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= 0;
int ret= get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0], blobs_buffer_size[0],
ptrdiff);
IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[0],
blobs_buffer[0],
blobs_buffer_size[0],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[0],
......@@ -3296,7 +3301,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
/*
since table has a primary key, we can do a write
using only after values
*/
*/
trans.write_row(::server_id, injector::transaction::table(table, TRUE),
&b, n_fields, table->record[0]);// after values
}
......@@ -3305,22 +3310,24 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
/*
mysql server cannot handle the ndb hidden key and
therefore needs the before image as well
*/
*/
if (share->flags & NSF_BLOB_FLAG)
{
my_ptrdiff_t ptrdiff= table->record[1] - table->record[0];
int ret= get_ndb_blobs_value(table, share->ndb_value[1],
blobs_buffer[1], blobs_buffer_size[1],
ptrdiff);
IF_DBUG(int ret =) get_ndb_blobs_value(table, share->ndb_value[1],
blobs_buffer[1],
blobs_buffer_size[1],
ptrdiff);
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
DBUG_EXECUTE("info", print_records(table, table->record[1]););
int ret= trans.update_row(::server_id,
injector::transaction::table(table, TRUE),
&b, n_fields,
table->record[1], // before values
table->record[0]);// after values
IF_DBUG(int ret =) trans.update_row(::server_id,
injector::transaction::table(table,
TRUE),
&b, n_fields,
table->record[1], // before values
table->record[0]);// after values
DBUG_ASSERT(ret == 0);
}
}
......@@ -3850,7 +3857,7 @@ restart:
continue;
}
TABLE *table= share->table;
const LEX_STRING &name= table->s->table_name;
IF_DBUG(const LEX_STRING &name= table->s->table_name);
if ((event_types & (NdbDictionary::Event::TE_INSERT |
NdbDictionary::Event::TE_UPDATE |
NdbDictionary::Event::TE_DELETE)) == 0)
......@@ -3867,7 +3874,7 @@ restart:
}
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
injector::transaction::table tbl(table, TRUE);
int ret= trans.use_table(::server_id, tbl);
IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
}
}
......@@ -3877,10 +3884,10 @@ restart:
{
TABLE *table= ndb_apply_status_share->table;
const LEX_STRING& name=table->s->table_name;
IF_DBUG(const LEX_STRING& name= table->s->table_name);
DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str));
injector::transaction::table tbl(table, TRUE);
int ret= trans.use_table(::server_id, tbl);
IF_DBUG(int ret=) trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0);
// Set all fields non-null.
......@@ -3945,7 +3952,7 @@ restart:
else
{
// set injector_ndb database/schema from table internal name
int ret=
IF_DBUG(int ret=)
i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable());
DBUG_ASSERT(ret == 0);
ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row);
......@@ -3979,7 +3986,7 @@ restart:
/*
note! pOp is not referring to an event in the next epoch
or is == 0
*/
*/
#ifdef RUN_NDB_BINLOG_TIMER
write_timer.stop();
#endif
......
......@@ -284,12 +284,14 @@ public:
*/
int check_state(enum_state const target_state)
{
#ifndef DBUG_OFF
static char const *state_name[] = {
"START_STATE", "TABLE_STATE", "ROW_STATE", "STATE_COUNT"
};
DBUG_ASSERT(0 <= target_state && target_state <= STATE_COUNT);
DBUG_PRINT("info", ("In state %s", state_name[m_state]));
#endif
if (m_state <= target_state && target_state <= m_state + 1 &&
m_state < STATE_COUNT)
......
......@@ -163,7 +163,7 @@ void mysql_client_binlog_statement(THD* thd)
(ulong) uint4korr(bufptr+EVENT_LEN_OFFSET)));
#endif
ev->thd= thd;
if (int err= ev->exec_event(thd->rli_fake))
if (IF_DBUG(int err= ) ev->exec_event(thd->rli_fake))
{
DBUG_PRINT("error", ("exec_event() returned: %d", err));
/*
......
......@@ -23,7 +23,9 @@
int max_binlog_dump_events = 0; // unlimited
my_bool opt_sporadic_binlog_dump_fail = 0;
#ifndef DBUG_OFF
static int binlog_dump_count = 0;
#endif
/*
fake_rotate_event() builds a fake (=which does not exist physically in any
......
......@@ -4818,7 +4818,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
else
unlock_dst_table= TRUE;
int result= store_create_info(thd, table, &query, create_info);
IF_DBUG(int result=)store_create_info(thd, table, &query, create_info);
DBUG_ASSERT(result == 0); // store_create_info() always return 0
write_bin_log(thd, TRUE, query.ptr(), query.length());
......
......@@ -460,6 +460,8 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info)
{
TABLE_LIST *ptr;
LEX_STRING db, name;
db.length= 0;
db.str= 0;
if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))))
goto err;
......@@ -570,6 +572,8 @@ void ha_myisammrg::append_create_info(String *packet)
open_table++)
{
LEX_STRING db, name;
db.length= 0;
db.str= 0;
split_file_name(open_table->table->filename, &db, &name);
if (open_table != first)
packet->append(',');
......
......@@ -322,6 +322,7 @@ static int my_strncasecmp_ucs2(CHARSET_INFO *cs,
const char *te=t+len;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
LINT_INIT(s_wc);
LINT_INIT(t_wc);
while ( s < se && t < te )
{
......@@ -1385,6 +1386,7 @@ int my_strnncoll_ucs2_bin(CHARSET_INFO *cs,
const uchar *se=s+slen;
const uchar *te=t+tlen;
LINT_INIT(s_wc);
LINT_INIT(t_wc);
while ( s < se && t < te )
{
......
......@@ -2313,6 +2313,7 @@ static int my_strnncoll_utf8(CHARSET_INFO *cs,
const uchar *te=t+tlen;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
LINT_INIT(s_wc);
LINT_INIT(t_wc);
while ( s < se && t < te )
{
......@@ -2383,6 +2384,7 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs,
const uchar *se= s+slen, *te= t+tlen;
MY_UNICASE_INFO **uni_plane= cs->caseinfo;
LINT_INIT(s_wc);
LINT_INIT(t_wc);
#ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
diff_if_only_endspace_difference= 0;
......
......@@ -33,17 +33,23 @@ db_vrfy.c : .*comparison is always false due to limited range of data type.*
# Ignore all conversion warnings on windows 64
# (Is safe as we are not yet supporting strings >= 2G)
#
.* : conversion from 'size_t' to .*int'.*
.* : conversion from '.*size_t' to .*int'.*
.* : conversion from '__int64' to .*int'.*
.* : conversion from '__int64' to uint8'.*
.* : conversion from '__int64' to uint32'.*
.* : conversion from '.*size_t' to 'TaoCrypt::word32'.*
.* : conversion from '.*size_t' to 'u.*long'.*
#
# The following should be fixed by the ndb team
#
.*/ndb/.* : .*used uninitialized in this function.*
.*/ndb/.* : .*unused variable.*
.*/ndb/.* : .*defined but not used.*
#
# Unexplanable (?) stuff
#
listener.cc : .*conversion from 'SOCKET' to 'int'.*
net_serv.c : .*conversion from 'SOCKET' to 'int'.*
net_serv.cc : .*conversion from 'SOCKET' to 'int'.*
mi_packrec.c : .*result of 32-bit shift implicitly converted to 64 bits.* : 567
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment