Commit 719c88e3 authored by monty@mishka.local's avatar monty@mishka.local

true,false -> TRUE, FALSE

Simple fixes/optimization of things discovered during review of new pushed code
parent 05433202
...@@ -726,7 +726,7 @@ extern void my_free_lock(byte *ptr,myf flags); ...@@ -726,7 +726,7 @@ extern void my_free_lock(byte *ptr,myf flags);
#endif #endif
#define alloc_root_inited(A) ((A)->min_malloc != 0) #define alloc_root_inited(A) ((A)->min_malloc != 0)
#define ALLOC_ROOT_MIN_BLOCK_SIZE (MALLOC_OVERHEAD + sizeof(USED_MEM) + 8) #define ALLOC_ROOT_MIN_BLOCK_SIZE (MALLOC_OVERHEAD + sizeof(USED_MEM) + 8)
#define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; } while(0) #define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; (A)->min_malloc=0;} while(0)
extern void init_alloc_root(MEM_ROOT *mem_root, uint block_size, extern void init_alloc_root(MEM_ROOT *mem_root, uint block_size,
uint pre_alloc_size); uint pre_alloc_size);
extern gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size); extern gptr alloc_root(MEM_ROOT *mem_root,unsigned int Size);
......
...@@ -72,19 +72,48 @@ _hash_init(HASH *hash,CHARSET_INFO *charset, ...@@ -72,19 +72,48 @@ _hash_init(HASH *hash,CHARSET_INFO *charset,
} }
void hash_free(HASH *hash) /*
Call hash->free on all elements in hash.
SYNOPSIS
hash_free_elements()
hash hash table
NOTES:
Sets records to 0
*/
static void inline hash_free_elements(HASH *hash)
{ {
DBUG_ENTER("hash_free");
if (hash->free) if (hash->free)
{ {
uint i,records;
HASH_LINK *data=dynamic_element(&hash->array,0,HASH_LINK*); HASH_LINK *data=dynamic_element(&hash->array,0,HASH_LINK*);
for (i=0,records=hash->records ; i < records ; i++) HASH_LINK *end= data + hash->records;
(*hash->free)(data[i].data); while (data < end)
hash->free=0; (*hash->free)((data++)->data);
} }
delete_dynamic(&hash->array);
hash->records=0; hash->records=0;
}
/*
Free memory used by hash.
SYNOPSIS
hash_free()
hash the hash to delete elements of
NOTES: Hash can't be reused wuthing calling hash_init again.
*/
void hash_free(HASH *hash)
{
DBUG_ENTER("hash_free");
DBUG_PRINT("enter",("hash: 0x%lxd",hash));
hash_free_elements(hash);
hash->free= 0;
delete_dynamic(&hash->array);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
...@@ -100,15 +129,11 @@ void hash_free(HASH *hash) ...@@ -100,15 +129,11 @@ void hash_free(HASH *hash)
void hash_reset(HASH *hash) void hash_reset(HASH *hash)
{ {
DBUG_ENTER("hash_reset"); DBUG_ENTER("hash_reset");
if (hash->free) DBUG_PRINT("enter",("hash: 0x%lxd",hash));
{
HASH_LINK *link= dynamic_element(&hash->array, 0, HASH_LINK*); hash_free_elements(hash);
HASH_LINK *end= link + hash->records;
for (; link < end; ++link)
(*hash->free)(link->data);
}
reset_dynamic(&hash->array); reset_dynamic(&hash->array);
hash->records= 0; /* Set row pointers so that the hash can be reused at once */
hash->blength= 1; hash->blength= 1;
hash->current_record= NO_RECORD; hash->current_record= NO_RECORD;
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include <m_string.h> #include <m_string.h>
inline void bitmap_lock(MY_BITMAP *map) static inline void bitmap_lock(MY_BITMAP *map)
{ {
#ifdef THREAD #ifdef THREAD
if (map->mutex) if (map->mutex)
...@@ -47,7 +47,7 @@ inline void bitmap_lock(MY_BITMAP *map) ...@@ -47,7 +47,7 @@ inline void bitmap_lock(MY_BITMAP *map)
} }
inline void bitmap_unlock(MY_BITMAP *map) static inline void bitmap_unlock(MY_BITMAP *map)
{ {
#ifdef THREAD #ifdef THREAD
if (map->mutex) if (map->mutex)
......
...@@ -103,14 +103,15 @@ ...@@ -103,14 +103,15 @@
rows - This is an unsigned long long which is the number of rows in the data rows - This is an unsigned long long which is the number of rows in the data
file. file.
check point - Reserved for future use check point - Reserved for future use
dirty - Status of the file, whether or not its values are the latest. This flag dirty - Status of the file, whether or not its values are the latest. This
is what causes a repair to occur flag is what causes a repair to occur
The data file: The data file:
check - Just an int of 254 to make sure that the the file we are opening was check - Just an int of 254 to make sure that the the file we are opening was
never corrupted. never corrupted.
version - The current version of the file format. version - The current version of the file format.
data - The data is stored in a "row +blobs" format. data - The data is stored in a "row +blobs" format.
*/
/* Variables for archive share methods */ /* Variables for archive share methods */
pthread_mutex_t archive_mutex; pthread_mutex_t archive_mutex;
......
...@@ -65,7 +65,7 @@ typedef NdbDictionary::Table NDBTAB; ...@@ -65,7 +65,7 @@ typedef NdbDictionary::Table NDBTAB;
typedef NdbDictionary::Index NDBINDEX; typedef NdbDictionary::Index NDBINDEX;
typedef NdbDictionary::Dictionary NDBDICT; typedef NdbDictionary::Dictionary NDBDICT;
bool ndbcluster_inited= false; bool ndbcluster_inited= FALSE;
static Ndb* g_ndb= NULL; static Ndb* g_ndb= NULL;
static Ndb_cluster_connection* g_ndb_cluster_connection= NULL; static Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
...@@ -146,8 +146,10 @@ inline ...@@ -146,8 +146,10 @@ inline
int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans)
{ {
int m_batch_execute= 0; int m_batch_execute= 0;
if (false && m_batch_execute) #ifdef NOT_USED
if (m_batch_execute)
return 0; return 0;
#endif
return trans->execute(NoCommit,AbortOnError,1); return trans->execute(NoCommit,AbortOnError,1);
} }
...@@ -155,8 +157,10 @@ inline ...@@ -155,8 +157,10 @@ inline
int execute_commit(ha_ndbcluster *h, NdbConnection *trans) int execute_commit(ha_ndbcluster *h, NdbConnection *trans)
{ {
int m_batch_execute= 0; int m_batch_execute= 0;
if (false && m_batch_execute) #ifdef NOT_USED
if (m_batch_execute)
return 0; return 0;
#endif
return trans->execute(Commit,AbortOnError,1); return trans->execute(Commit,AbortOnError,1);
} }
...@@ -164,8 +168,10 @@ inline ...@@ -164,8 +168,10 @@ inline
int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans) int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans)
{ {
int m_batch_execute= 0; int m_batch_execute= 0;
if (false && m_batch_execute) #ifdef NOT_USED
if (m_batch_execute)
return 0; return 0;
#endif
return trans->execute(NoCommit,IgnoreError,1); return trans->execute(NoCommit,IgnoreError,1);
} }
...@@ -326,7 +332,7 @@ bool ha_ndbcluster::get_error_message(int error, ...@@ -326,7 +332,7 @@ bool ha_ndbcluster::get_error_message(int error,
Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
if (!ndb) if (!ndb)
DBUG_RETURN(false); DBUG_RETURN(FALSE);
const NdbError err= ndb->getNdbError(error); const NdbError err= ndb->getNdbError(error);
bool temporary= err.status==NdbError::TemporaryError; bool temporary= err.status==NdbError::TemporaryError;
...@@ -367,12 +373,12 @@ static inline bool ndb_supported_type(enum_field_types type) ...@@ -367,12 +373,12 @@ static inline bool ndb_supported_type(enum_field_types type)
case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_LONG_BLOB:
case MYSQL_TYPE_ENUM: case MYSQL_TYPE_ENUM:
case MYSQL_TYPE_SET: case MYSQL_TYPE_SET:
return true; return TRUE;
case MYSQL_TYPE_NULL: case MYSQL_TYPE_NULL:
case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_GEOMETRY:
break; break;
} }
return false; return FALSE;
} }
...@@ -466,7 +472,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, ...@@ -466,7 +472,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value) if (set_blob_value)
*set_blob_value= true; *set_blob_value= TRUE;
// No callback needed to write value // No callback needed to write value
DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0); DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0);
} }
...@@ -609,24 +615,24 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, ...@@ -609,24 +615,24 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
bool ha_ndbcluster::uses_blob_value(bool all_fields) bool ha_ndbcluster::uses_blob_value(bool all_fields)
{ {
if (table->blob_fields == 0) if (table->blob_fields == 0)
return false; return FALSE;
if (all_fields) if (all_fields)
return true; return TRUE;
{ {
uint no_fields= table->fields; uint no_fields= table->fields;
int i; int i;
THD *thd= current_thd; THD *thd= table->in_use;
// They always put blobs at the end.. // They always put blobs at the end..
for (i= no_fields - 1; i >= 0; i--) for (i= no_fields - 1; i >= 0; i--)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if (thd->query_id == field->query_id) if (thd->query_id == field->query_id)
{ {
return true; return TRUE;
} }
} }
} }
return false; return FALSE;
} }
...@@ -645,7 +651,7 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -645,7 +651,7 @@ int ha_ndbcluster::get_metadata(const char *path)
NDBDICT *dict= m_ndb->getDictionary(); NDBDICT *dict= m_ndb->getDictionary();
const NDBTAB *tab; const NDBTAB *tab;
int error; int error;
bool invalidating_ndb_table= false; bool invalidating_ndb_table= FALSE;
DBUG_ENTER("get_metadata"); DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
...@@ -676,7 +682,7 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -676,7 +682,7 @@ int ha_ndbcluster::get_metadata(const char *path)
{ {
DBUG_PRINT("info", ("Invalidating table")); DBUG_PRINT("info", ("Invalidating table"));
dict->invalidateTable(m_tabname); dict->invalidateTable(m_tabname);
invalidating_ndb_table= true; invalidating_ndb_table= TRUE;
} }
else else
{ {
...@@ -687,12 +693,12 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -687,12 +693,12 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_DUMP("pack_data", (char*)pack_data, pack_length); DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
error= 3; error= 3;
invalidating_ndb_table= false; invalidating_ndb_table= FALSE;
} }
} }
else else
{ {
invalidating_ndb_table= false; invalidating_ndb_table= FALSE;
} }
my_free((char*)data, MYF(0)); my_free((char*)data, MYF(0));
my_free((char*)pack_data, MYF(0)); my_free((char*)pack_data, MYF(0));
...@@ -755,7 +761,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) ...@@ -755,7 +761,7 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
error= create_ordered_index(index_name, key_info); error= create_ordered_index(index_name, key_info);
break; break;
default: default:
DBUG_ASSERT(false); DBUG_ASSERT(FALSE);
break; break;
} }
if (error) if (error)
...@@ -1172,7 +1178,7 @@ inline int ha_ndbcluster::next_result(byte *buf) ...@@ -1172,7 +1178,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
ops_pending= 0; ops_pending= 0;
blobs_pending= false; blobs_pending= FALSE;
} }
check= cursor->nextResult(contact_ndb); check= cursor->nextResult(contact_ndb);
if (check == 0) if (check == 0)
...@@ -1585,7 +1591,7 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1585,7 +1591,7 @@ int ha_ndbcluster::write_row(byte *record)
if (has_auto_increment) if (has_auto_increment)
{ {
skip_auto_increment= false; skip_auto_increment= FALSE;
update_auto_increment(); update_auto_increment();
skip_auto_increment= !auto_increment_column_changed; skip_auto_increment= !auto_increment_column_changed;
} }
...@@ -1595,14 +1601,14 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1595,14 +1601,14 @@ int ha_ndbcluster::write_row(byte *record)
} }
// Set non-key attribute(s) // Set non-key attribute(s)
bool set_blob_value= false; bool set_blob_value= FALSE;
for (i= 0; i < table->fields; i++) for (i= 0; i < table->fields; i++)
{ {
Field *field= table->field[i]; Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) && if (!(field->flags & PRI_KEY_FLAG) &&
set_ndb_value(op, field, i, &set_blob_value)) set_ndb_value(op, field, i, &set_blob_value))
{ {
skip_auto_increment= true; skip_auto_increment= TRUE;
ERR_RETURN(op->getNdbError()); ERR_RETURN(op->getNdbError());
} }
} }
...@@ -1616,7 +1622,7 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1616,7 +1622,7 @@ int ha_ndbcluster::write_row(byte *record)
*/ */
rows_inserted++; rows_inserted++;
no_uncommitted_rows_update(1); no_uncommitted_rows_update(1);
bulk_insert_not_flushed= true; bulk_insert_not_flushed= TRUE;
if ((rows_to_insert == 1) || if ((rows_to_insert == 1) ||
((rows_inserted % bulk_insert_rows) == 0) || ((rows_inserted % bulk_insert_rows) == 0) ||
set_blob_value) set_blob_value)
...@@ -1627,12 +1633,12 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1627,12 +1633,12 @@ int ha_ndbcluster::write_row(byte *record)
"rows_inserted:%d, bulk_insert_rows: %d", "rows_inserted:%d, bulk_insert_rows: %d",
(int)rows_inserted, (int)bulk_insert_rows)); (int)rows_inserted, (int)bulk_insert_rows));
bulk_insert_not_flushed= false; bulk_insert_not_flushed= FALSE;
if (thd->transaction.on) if (thd->transaction.on)
{ {
if (execute_no_commit(this,trans) != 0) if (execute_no_commit(this,trans) != 0)
{ {
skip_auto_increment= true; skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
} }
...@@ -1641,7 +1647,7 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1641,7 +1647,7 @@ int ha_ndbcluster::write_row(byte *record)
{ {
if (execute_commit(this,trans) != 0) if (execute_commit(this,trans) != 0)
{ {
skip_auto_increment= true; skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(ndb_err(trans));
} }
...@@ -1655,11 +1661,11 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1655,11 +1661,11 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_PRINT("info", DBUG_PRINT("info",
("Trying to set next auto increment value to %lu", ("Trying to set next auto increment value to %lu",
(ulong) next_val)); (ulong) next_val));
if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, true)) if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
DBUG_PRINT("info", DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val)); ("Setting next auto increment value to %u", next_val));
} }
skip_auto_increment= true; skip_auto_increment= TRUE;
DBUG_RETURN(0); DBUG_RETURN(0);
} }
...@@ -1763,8 +1769,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) ...@@ -1763,8 +1769,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
if (!(op= cursor->updateTuple())) if (!(op= cursor->updateTuple()))
ERR_RETURN(trans->getNdbError()); ERR_RETURN(trans->getNdbError());
ops_pending++; ops_pending++;
if (uses_blob_value(false)) if (uses_blob_value(FALSE))
blobs_pending= true; blobs_pending= TRUE;
} }
else else
{ {
...@@ -1920,7 +1926,7 @@ void ha_ndbcluster::unpack_record(byte* buf) ...@@ -1920,7 +1926,7 @@ void ha_ndbcluster::unpack_record(byte* buf)
else else
{ {
NdbBlob* ndb_blob= (*value).blob; NdbBlob* ndb_blob= (*value).blob;
bool isNull= true; bool isNull= TRUE;
int ret= ndb_blob->getNull(isNull); int ret= ndb_blob->getNull(isNull);
DBUG_ASSERT(ret == 0); DBUG_ASSERT(ret == 0);
if (isNull) if (isNull)
...@@ -1988,7 +1994,7 @@ void ha_ndbcluster::print_results() ...@@ -1988,7 +1994,7 @@ void ha_ndbcluster::print_results()
else else
{ {
ndb_blob= value.blob; ndb_blob= value.blob;
bool isNull= true; bool isNull= TRUE;
ndb_blob->getNull(isNull); ndb_blob->getNull(isNull);
if (isNull) { if (isNull) {
fprintf(DBUG_FILE, "NULL\n"); fprintf(DBUG_FILE, "NULL\n");
...@@ -2165,7 +2171,7 @@ int ha_ndbcluster::index_read(byte *buf, ...@@ -2165,7 +2171,7 @@ int ha_ndbcluster::index_read(byte *buf,
break; break;
default: default:
case UNDEFINED_INDEX: case UNDEFINED_INDEX:
DBUG_ASSERT(false); DBUG_ASSERT(FALSE);
return 1; return 1;
break; break;
} }
...@@ -2177,7 +2183,7 @@ int ha_ndbcluster::index_read(byte *buf, ...@@ -2177,7 +2183,7 @@ int ha_ndbcluster::index_read(byte *buf,
start_key.key = key; start_key.key = key;
start_key.length = key_len; start_key.length = key_len;
start_key.flag = find_flag; start_key.flag = find_flag;
error= ordered_index_scan(&start_key, 0, true, buf); error= ordered_index_scan(&start_key, 0, TRUE, buf);
DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error); DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error);
} }
...@@ -2219,7 +2225,7 @@ int ha_ndbcluster::index_first(byte *buf) ...@@ -2219,7 +2225,7 @@ int ha_ndbcluster::index_first(byte *buf)
// Start the ordered index scan and fetch the first row // Start the ordered index scan and fetch the first row
// Only HA_READ_ORDER indexes get called by index_first // Only HA_READ_ORDER indexes get called by index_first
DBUG_RETURN(ordered_index_scan(0, 0, true, buf)); DBUG_RETURN(ordered_index_scan(0, 0, TRUE, buf));
} }
...@@ -2228,9 +2234,9 @@ int ha_ndbcluster::index_last(byte *buf) ...@@ -2228,9 +2234,9 @@ int ha_ndbcluster::index_last(byte *buf)
DBUG_ENTER("index_last"); DBUG_ENTER("index_last");
statistic_increment(ha_read_last_count,&LOCK_status); statistic_increment(ha_read_last_count,&LOCK_status);
int res; int res;
if((res= ordered_index_scan(0, 0, true, buf)) == 0){ if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){
NdbResultSet *cursor= m_active_cursor; NdbResultSet *cursor= m_active_cursor;
while((res= cursor->nextResult(true)) == 0); while((res= cursor->nextResult(TRUE)) == 0);
if(res == 1){ if(res == 1){
unpack_record(buf); unpack_record(buf);
table->status= 0; table->status= 0;
...@@ -2584,8 +2590,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) ...@@ -2584,8 +2590,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
case HA_EXTRA_NO_IGNORE_DUP_KEY: case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= false; m_use_write= FALSE;
m_ignore_dup_key_not_supported= false; m_ignore_dup_key_not_supported= FALSE;
break; break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as where field->query_id is the same as
...@@ -2671,7 +2677,7 @@ int ha_ndbcluster::end_bulk_insert() ...@@ -2671,7 +2677,7 @@ int ha_ndbcluster::end_bulk_insert()
DBUG_PRINT("info", ("Sending inserts to NDB, "\ DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d", "rows_inserted:%d, bulk_insert_rows: %d",
rows_inserted, bulk_insert_rows)); rows_inserted, bulk_insert_rows));
bulk_insert_not_flushed= false; bulk_insert_not_flushed= FALSE;
if (execute_no_commit(this,trans) != 0) { if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure(); no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans); my_errno= error= ndb_err(trans);
...@@ -3210,7 +3216,7 @@ static int create_ndb_column(NDBCOL &col, ...@@ -3210,7 +3216,7 @@ static int create_ndb_column(NDBCOL &col,
col.setAutoIncrementInitialValue(value); col.setAutoIncrementInitialValue(value);
} }
else else
col.setAutoIncrement(false); col.setAutoIncrement(FALSE);
return 0; return 0;
} }
...@@ -3280,7 +3286,7 @@ int ha_ndbcluster::create(const char *name, ...@@ -3280,7 +3286,7 @@ int ha_ndbcluster::create(const char *name,
col.setName("$PK"); col.setName("$PK");
col.setType(NdbDictionary::Column::Bigunsigned); col.setType(NdbDictionary::Column::Bigunsigned);
col.setLength(1); col.setLength(1);
col.setNullable(false); col.setNullable(FALSE);
col.setPrimaryKey(TRUE); col.setPrimaryKey(TRUE);
col.setAutoIncrement(TRUE); col.setAutoIncrement(TRUE);
tab.addColumn(col); tab.addColumn(col);
...@@ -3315,7 +3321,7 @@ int ha_ndbcluster::create_ordered_index(const char *name, ...@@ -3315,7 +3321,7 @@ int ha_ndbcluster::create_ordered_index(const char *name,
KEY *key_info) KEY *key_info)
{ {
DBUG_ENTER("create_ordered_index"); DBUG_ENTER("create_ordered_index");
DBUG_RETURN(create_index(name, key_info, false)); DBUG_RETURN(create_index(name, key_info, FALSE));
} }
int ha_ndbcluster::create_unique_index(const char *name, int ha_ndbcluster::create_unique_index(const char *name,
...@@ -3323,7 +3329,7 @@ int ha_ndbcluster::create_unique_index(const char *name, ...@@ -3323,7 +3329,7 @@ int ha_ndbcluster::create_unique_index(const char *name,
{ {
DBUG_ENTER("create_unique_index"); DBUG_ENTER("create_unique_index");
DBUG_RETURN(create_index(name, key_info, true)); DBUG_RETURN(create_index(name, key_info, TRUE));
} }
...@@ -3349,7 +3355,7 @@ int ha_ndbcluster::create_index(const char *name, ...@@ -3349,7 +3355,7 @@ int ha_ndbcluster::create_index(const char *name,
{ {
ndb_index.setType(NdbDictionary::Index::OrderedIndex); ndb_index.setType(NdbDictionary::Index::OrderedIndex);
// TODO Only temporary ordered indexes supported // TODO Only temporary ordered indexes supported
ndb_index.setLogging(false); ndb_index.setLogging(FALSE);
} }
ndb_index.setTable(m_tabname); ndb_index.setTable(m_tabname);
...@@ -3512,15 +3518,15 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): ...@@ -3512,15 +3518,15 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_AUTO_PART_KEY | HA_AUTO_PART_KEY |
HA_NO_PREFIX_CHAR_KEYS), HA_NO_PREFIX_CHAR_KEYS),
m_share(0), m_share(0),
m_use_write(false), m_use_write(FALSE),
m_ignore_dup_key_not_supported(false), m_ignore_dup_key_not_supported(FALSE),
retrieve_all_fields(FALSE), retrieve_all_fields(FALSE),
rows_to_insert(1), rows_to_insert(1),
rows_inserted(0), rows_inserted(0),
bulk_insert_rows(1024), bulk_insert_rows(1024),
bulk_insert_not_flushed(false), bulk_insert_not_flushed(FALSE),
ops_pending(0), ops_pending(0),
skip_auto_increment(true), skip_auto_increment(TRUE),
blobs_pending(0), blobs_pending(0),
blobs_buffer(0), blobs_buffer(0),
blobs_buffer_size(0), blobs_buffer_size(0),
...@@ -3931,9 +3937,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, ...@@ -3931,9 +3937,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
table_list.db= (char*) db; table_list.db= (char*) db;
table_list.real_name=(char*)file_name; table_list.real_name=(char*)file_name;
(void)mysql_rm_table_part2(thd, &table_list, (void)mysql_rm_table_part2(thd, &table_list,
/* if_exists */ true, /* if_exists */ TRUE,
/* drop_temporary */ false, /* drop_temporary */ FALSE,
/* dont_log_query*/ true); /* dont_log_query*/ TRUE);
} }
} }
...@@ -3942,7 +3948,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, ...@@ -3942,7 +3948,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path,
while ((file_name=it2++)) while ((file_name=it2++))
{ {
DBUG_PRINT("info", ("Table %s need discovery", name)); DBUG_PRINT("info", ("Table %s need discovery", name));
if (ha_create_table_from_engine(thd, db, file_name, true) == 0) if (ha_create_table_from_engine(thd, db, file_name, TRUE) == 0)
files->push_back(thd->strdup(file_name)); files->push_back(thd->strdup(file_name));
} }
...@@ -4009,7 +4015,7 @@ bool ndbcluster_init() ...@@ -4009,7 +4015,7 @@ bool ndbcluster_init()
if (ndb_discover_tables() != 0) if (ndb_discover_tables() != 0)
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
#endif #endif
DBUG_RETURN(false); DBUG_RETURN(FALSE);
} }
...@@ -4367,7 +4373,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, ...@@ -4367,7 +4373,7 @@ ndb_get_table_statistics(Ndb* ndb, const char * table,
Uint64 sum_rows= 0; Uint64 sum_rows= 0;
Uint64 sum_commits= 0; Uint64 sum_commits= 0;
while((check= rs->nextResult(true)) == 0) while((check= rs->nextResult(TRUE)) == 0)
{ {
sum_rows+= rows; sum_rows+= rows;
sum_commits+= commits; sum_commits+= commits;
......
...@@ -139,12 +139,12 @@ class ha_ndbcluster: public handler ...@@ -139,12 +139,12 @@ class ha_ndbcluster: public handler
bool low_byte_first() const bool low_byte_first() const
{ {
#ifdef WORDS_BIGENDIAN #ifdef WORDS_BIGENDIAN
return false; return FALSE;
#else #else
return true; return TRUE;
#endif #endif
} }
bool has_transactions() { return true; } bool has_transactions() { return TRUE; }
const char* index_type(uint key_number) { const char* index_type(uint key_number) {
switch (get_index_type(key_number)) { switch (get_index_type(key_number)) {
......
...@@ -1105,6 +1105,11 @@ void handler::print_error(int error, myf errflag) ...@@ -1105,6 +1105,11 @@ void handler::print_error(int error, myf errflag)
break; break;
case HA_ERR_NO_SUCH_TABLE: case HA_ERR_NO_SUCH_TABLE:
{ {
/*
We have to use path to find database name instead of using
table->table_cache_key because if the table didn't exist, then
table_cache_key was not set up
*/
char *db; char *db;
char buff[FN_REFLEN]; char buff[FN_REFLEN];
uint length=dirname_part(buff,table->path); uint length=dirname_part(buff,table->path);
...@@ -1276,22 +1281,25 @@ int ha_create_table_from_engine(THD* thd, ...@@ -1276,22 +1281,25 @@ int ha_create_table_from_engine(THD* thd,
const char *name, const char *name,
bool create_if_found) bool create_if_found)
{ {
int error= 0; int error;
const void* frmblob = NULL; const void *frmblob;
uint frmlen = 0; uint frmlen;
char path[FN_REFLEN]; char path[FN_REFLEN];
HA_CREATE_INFO create_info; HA_CREATE_INFO create_info;
TABLE table; TABLE table;
DBUG_ENTER("ha_create_table_from_engine"); DBUG_ENTER("ha_create_table_from_engine");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); DBUG_PRINT("enter", ("name '%s'.'%s' create_if_found: %d",
DBUG_PRINT("enter", ("create_if_found: %d", create_if_found)); db, name, create_if_found));
bzero((char*) &create_info,sizeof(create_info)); bzero((char*) &create_info,sizeof(create_info));
if ((error= ha_discover(thd, db, name, &frmblob, &frmlen))) if ((error= ha_discover(thd, db, name, &frmblob, &frmlen)))
DBUG_RETURN(error); DBUG_RETURN(error);
/*
Table exists in handler
frmblob and frmlen are set
*/
// Table exists in handler
if (create_if_found) if (create_if_found)
{ {
(void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS); (void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS);
...@@ -1309,9 +1317,7 @@ int ha_create_table_from_engine(THD* thd, ...@@ -1309,9 +1317,7 @@ int ha_create_table_from_engine(THD* thd,
!(table.file->table_flags() & HA_FILE_BASED)) !(table.file->table_flags() & HA_FILE_BASED))
{ {
/* Ensure that handler gets name in lower case */ /* Ensure that handler gets name in lower case */
strmov(path, name);
my_casedn_str(files_charset_info, path); my_casedn_str(files_charset_info, path);
name= path;
} }
error=table.file->create(path,&table,&create_info); error=table.file->create(path,&table,&create_info);
...@@ -1319,8 +1325,7 @@ int ha_create_table_from_engine(THD* thd, ...@@ -1319,8 +1325,7 @@ int ha_create_table_from_engine(THD* thd,
} }
err_end: err_end:
if (frmblob) my_free((char*) frmblob, MYF(MY_ALLOW_ZERO));
my_free((char*) frmblob,MYF(0));
DBUG_RETURN(error); DBUG_RETURN(error);
} }
...@@ -1429,10 +1434,14 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache, ...@@ -1429,10 +1434,14 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
/* /*
Try to discover one table from handler(s) Try to discover one table from handler(s)
RETURN
0 ok. In this case *frmblob and *frmlen are set
1 error. frmblob and frmlen may not be set
*/ */
int ha_discover(THD* thd, const char* db, const char* name, int ha_discover(THD *thd, const char *db, const char *name,
const void** frmblob, uint* frmlen) const void **frmblob, uint *frmlen)
{ {
int error= 1; // Table does not exist in any handler int error= 1; // Table does not exist in any handler
DBUG_ENTER("ha_discover"); DBUG_ENTER("ha_discover");
...@@ -1470,6 +1479,8 @@ ha_find_files(THD *thd,const char *db,const char *path, ...@@ -1470,6 +1479,8 @@ ha_find_files(THD *thd,const char *db,const char *path,
} }
#ifdef NOT_YET_USED
/* /*
Ask handler if the table exists in engine Ask handler if the table exists in engine
...@@ -1491,6 +1502,7 @@ int ha_table_exists(THD* thd, const char* db, const char* name) ...@@ -1491,6 +1502,7 @@ int ha_table_exists(THD* thd, const char* db, const char* name)
DBUG_RETURN(error); DBUG_RETURN(error);
} }
#endif
/* /*
......
...@@ -855,7 +855,7 @@ public: ...@@ -855,7 +855,7 @@ public:
char escape; char escape;
Item_func_like(Item *a,Item *b, Item *escape_arg) Item_func_like(Item *a,Item *b, Item *escape_arg)
:Item_bool_func2(a,b), canDoTurboBM(false), pattern(0), pattern_len(0), :Item_bool_func2(a,b), canDoTurboBM(FALSE), pattern(0), pattern_len(0),
bmGs(0), bmBc(0), escape_item(escape_arg) {} bmGs(0), bmBc(0), escape_item(escape_arg) {}
longlong val_int(); longlong val_int();
enum Functype functype() const { return LIKE_FUNC; } enum Functype functype() const { return LIKE_FUNC; }
......
...@@ -2709,41 +2709,40 @@ longlong Item_func_crc32::val_int() ...@@ -2709,41 +2709,40 @@ longlong Item_func_crc32::val_int()
String *Item_func_compress::val_str(String *str) String *Item_func_compress::val_str(String *str)
{ {
int err= Z_OK, code;
ulong new_size;
String *res;
Byte *body;
char *tmp, *last_char;
DBUG_ASSERT(fixed == 1); DBUG_ASSERT(fixed == 1);
String *res= args[0]->val_str(str);
if (!res) if (!(res= args[0]->val_str(str)))
{ {
null_value= 1; null_value= 1;
return 0; return 0;
} }
if (res->is_empty()) return res; if (res->is_empty()) return res;
int err= Z_OK;
int code;
/* /*
citation from zlib.h (comment for compress function): Citation from zlib.h (comment for compress function):
Compresses the source buffer into the destination buffer. sourceLen is Compresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total the byte length of the source buffer. Upon entry, destLen is the total
size of the destination buffer, which must be at least 0.1% larger than size of the destination buffer, which must be at least 0.1% larger than
sourceLen plus 12 bytes. sourceLen plus 12 bytes.
We assume here that the buffer can't grow more than .25 %.
Proportion 120/100 founded by Sinisa with help of procedure
compress(compress(compress(...)))
I.e. zlib give number 'at least'..
*/ */
ulong new_size= res->length() + res->length() / 5 + 12; new_size= res->length() + res->length() / 5 + 12;
// Will check new_size overflow: new_size <= res->length() // Check new_size overflow: new_size <= res->length()
if (((uint32) new_size <= res->length()) || if (((uint32) (new_size+5) <= res->length()) ||
buffer.realloc((uint32) new_size + 4 + 1)) buffer.realloc((uint32) new_size + 4 + 1))
{ {
null_value= 1; null_value= 1;
return 0; return 0;
} }
Byte *body= ((Byte*)buffer.ptr()) + 4; body= ((Byte*)buffer.ptr()) + 4;
// As far as we have checked res->is_empty() we can use ptr() // As far as we have checked res->is_empty() we can use ptr()
if ((err= compress(body, &new_size, if ((err= compress(body, &new_size,
...@@ -2755,11 +2754,11 @@ String *Item_func_compress::val_str(String *str) ...@@ -2755,11 +2754,11 @@ String *Item_func_compress::val_str(String *str)
return 0; return 0;
} }
char *tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects tmp= (char*)buffer.ptr(); // int4store is a macro; avoid side effects
int4store(tmp, res->length() & 0x3FFFFFFF); int4store(tmp, res->length() & 0x3FFFFFFF);
/* This is for the stupid char fields which trim ' ': */ /* This is to ensure that things works for CHAR fields, which trim ' ': */
char *last_char= ((char*)body)+new_size-1; last_char= ((char*)body)+new_size-1;
if (*last_char == ' ') if (*last_char == ' ')
{ {
*++last_char= '.'; *++last_char= '.';
......
...@@ -571,7 +571,7 @@ public: ...@@ -571,7 +571,7 @@ public:
{ {
fname= afname; fname= afname;
fname_len= alen; fname_len= alen;
local_fname= true; local_fname= TRUE;
} }
/* fname doesn't point to memory inside Log_event::temp_buf */ /* fname doesn't point to memory inside Log_event::temp_buf */
int check_fname_outside_temp_buf() int check_fname_outside_temp_buf()
......
...@@ -1368,7 +1368,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, ...@@ -1368,7 +1368,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
*/ */
if (discover_retry_count++ != 0) if (discover_retry_count++ != 0)
goto err; goto err;
if (ha_create_table_from_engine(thd, db, name, true) != 0) if (ha_create_table_from_engine(thd, db, name, TRUE) != 0)
goto err; goto err;
thd->clear_error(); // Clear error message thd->clear_error(); // Clear error message
...@@ -2846,8 +2846,15 @@ void flush_tables() ...@@ -2846,8 +2846,15 @@ void flush_tables()
/* /*
** Mark all entries with the table as deleted to force an reopen of the table Mark all entries with the table as deleted to force an reopen of the table
** Returns true if the table is in use by another thread
The table will be closed (not stored in cache) by the current thread when
close_thread_tables() is called.
RETURN
0 This thread now have exclusive access to this table and no other thread
can access the table until close_thread_tables() is called.
1 Table is in use by another thread
*/ */
bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
......
...@@ -746,7 +746,7 @@ int mysqld_help(THD *thd, const char *mask) ...@@ -746,7 +746,7 @@ int mysqld_help(THD *thd, const char *mask)
select,&subcategories_list); select,&subcategories_list);
delete select; delete select;
String *cat= categories_list.head(); String *cat= categories_list.head();
if (send_header_2(protocol, true) || if (send_header_2(protocol, TRUE) ||
send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) || send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) ||
send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat)) send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat))
goto end; goto end;
......
...@@ -454,7 +454,6 @@ inline static uint int_token(const char *str,uint length) ...@@ -454,7 +454,6 @@ inline static uint int_token(const char *str,uint length)
int yylex(void *arg, void *yythd) int yylex(void *arg, void *yythd)
{ {
reg1 uchar c; reg1 uchar c;
bool space_ignored;
int tokval, result_state; int tokval, result_state;
uint length; uint length;
enum my_lex_states state; enum my_lex_states state;
...@@ -537,6 +536,7 @@ int yylex(void *arg, void *yythd) ...@@ -537,6 +536,7 @@ int yylex(void *arg, void *yythd)
/* Fall through */ /* Fall through */
case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling
case MY_LEX_IDENT: case MY_LEX_IDENT:
uchar *start;
#if defined(USE_MB) && defined(USE_MB_IDENT) #if defined(USE_MB) && defined(USE_MB_IDENT)
if (use_mb(cs)) if (use_mb(cs))
{ {
...@@ -573,12 +573,16 @@ int yylex(void *arg, void *yythd) ...@@ -573,12 +573,16 @@ int yylex(void *arg, void *yythd)
result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT; result_state= result_state & 0x80 ? IDENT_QUOTED : IDENT;
} }
length= (uint) (lex->ptr - lex->tok_start)-1; length= (uint) (lex->ptr - lex->tok_start)-1;
space_ignored= FALSE; start= lex->ptr;
if (lex->ignore_space) if (lex->ignore_space)
{ {
for (; state_map[c] == MY_LEX_SKIP ; space_ignored= TRUE, c= yyGet()); /*
If we find a space then this can't be an identifier. We notice this
below by checking start != lex->ptr.
*/
for (; state_map[c] == MY_LEX_SKIP ; c= yyGet());
} }
if (! space_ignored && c == '.' && ident_map[yyPeek()]) if (start == lex->ptr && c == '.' && ident_map[yyPeek()])
lex->next_state=MY_LEX_IDENT_SEP; lex->next_state=MY_LEX_IDENT_SEP;
else else
{ // '(' must follow directly if function { // '(' must follow directly if function
......
...@@ -894,7 +894,7 @@ static int check_connection(THD *thd) ...@@ -894,7 +894,7 @@ static int check_connection(THD *thd)
x_free(thd->user); x_free(thd->user);
if (!(thd->user= my_strdup(user, MYF(0)))) if (!(thd->user= my_strdup(user, MYF(0))))
return (ER_OUT_OF_RESOURCES); return (ER_OUT_OF_RESOURCES);
return check_user(thd, COM_CONNECT, passwd, passwd_len, db, true); return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE);
} }
...@@ -4771,7 +4771,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, ...@@ -4771,7 +4771,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
acl_reload(thd); acl_reload(thd);
grant_reload(thd); grant_reload(thd);
if (mqh_used) if (mqh_used)
reset_mqh(thd,(LEX_USER *) NULL,true); reset_mqh(thd,(LEX_USER *) NULL,TRUE);
} }
#endif #endif
if (options & REFRESH_LOG) if (options & REFRESH_LOG)
......
...@@ -1780,7 +1780,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) ...@@ -1780,7 +1780,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
#endif #endif
DBUG_ASSERT(thd->free_list == NULL); DBUG_ASSERT(thd->free_list == NULL);
thd->protocol= &thd->protocol_prep; // Switch to binary protocol thd->protocol= &thd->protocol_prep; // Switch to binary protocol
execute_stmt(thd, stmt, &expanded_query, true); execute_stmt(thd, stmt, &expanded_query, TRUE);
thd->protocol= &thd->protocol_simple; // Use normal protocol thd->protocol= &thd->protocol_simple; // Use normal protocol
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
...@@ -1832,7 +1832,7 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) ...@@ -1832,7 +1832,7 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name)
my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE");
send_error(thd); send_error(thd);
} }
execute_stmt(thd, stmt, &expanded_query, false); execute_stmt(thd, stmt, &expanded_query, FALSE);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
......
...@@ -223,7 +223,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, ...@@ -223,7 +223,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists,
(void) unpack_filename(path,path); (void) unpack_filename(path,path);
} }
if (drop_temporary || if (drop_temporary ||
(access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,true))) (access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE)))
{ {
if (if_exists) if (if_exists)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
......
...@@ -843,14 +843,14 @@ prepare_src: ...@@ -843,14 +843,14 @@ prepare_src:
THD *thd=YYTHD; THD *thd=YYTHD;
LEX *lex= thd->lex; LEX *lex= thd->lex;
lex->prepared_stmt_code= $1; lex->prepared_stmt_code= $1;
lex->prepared_stmt_code_is_varref= false; lex->prepared_stmt_code_is_varref= FALSE;
} }
| '@' ident_or_text | '@' ident_or_text
{ {
THD *thd=YYTHD; THD *thd=YYTHD;
LEX *lex= thd->lex; LEX *lex= thd->lex;
lex->prepared_stmt_code= $2; lex->prepared_stmt_code= $2;
lex->prepared_stmt_code_is_varref= true; lex->prepared_stmt_code_is_varref= TRUE;
}; };
execute: execute:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment