Commit 88a3b3bc authored by unknown's avatar unknown

Fixed prototype of get_error_message to use String to return error message

WL#1747 and #1746 allow user to decide if ordered index should be created or not


BitKeeper/deleted/.del-AttrType.hpp~a9b2d6efcf660378:
  Delete: ndb/include/ndbapi/AttrType.hpp
sql/ha_ndbcluster.cc:
  Removed the NDB_ERR_CODE_OFFSET, ndb and handler error codes should not clash
  Encapsulated functionality to cache information about known indexes into buil_index_list
  Added detection of algorithm from key_info in function get_index_type_from_table
  Updated read_range_first and records_in_range to work wih new prototype.
sql/ha_ndbcluster.h:
  WL#1746 and WL#1747 Added ability to skip creating an ordered index in addition to the hash index if the user so wishes.
  Modified get_error_message to return error messaga in a String datatype, in that way the String class will take care of wheter the "data" has to be freed or not.
sql/handler.cc:
  Use String datatype as ouput parameter of get_error_message.
sql/handler.h:
  Changed the function prototype for getting error messages from handler to use String datataype
parent 4d3f8f21
/* Copyright (C) 2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/**
* @file AttrType.hpp
*/
#ifndef AttrType_H
#define AttrType_H
/**
* Max number of Ndb objects in different threads.
* (Ndb objects should not be shared by different threads.)
*/
const unsigned MAX_NO_THREADS = 4711;
/**
* Max number of attributes in a table.
*/
const unsigned MAXNROFATTRIBUTES = 128;
/**
* Max number of tuple keys for a table in NDB Cluster.
*
* A <em>tuple key</em> of a table is an attribute
* which is either part of the
* <em>primary key</em> or the <em>tuple id</em> of a table.
*/
const unsigned MAXNROFTUPLEKEY = 16;
/**
* Max number of words in a tuple key attribute.
*
* Tuple keys can not have values larger than
* 4092 bytes (i.e. 1023 words).
*/
const unsigned MAXTUPLEKEYLENOFATTERIBUTEINWORD = 1023;
/**
* Max number of ErrorCode in NDB Cluster range 0 - 1999.
*/
const unsigned MAXNDBCLUSTERERROR = 1999;
/**
* Max number of theErrorCode NDB API range 4000 - 4999.
*/
const unsigned MAXNROFERRORCODE = 5000;
/**
* <i>Missing explanation</i>
*/
enum ReturnType {
ReturnSuccess, ///< <i>Missing explanation</i>
ReturnFailure ///< <i>Missing explanation</i>
};
/**
*
*/
enum SendStatusType {
NotInit, ///< <i>Missing explanation</i>
InitState, ///< <i>Missing explanation</i>
sendOperations, ///< <i>Missing explanation</i>
sendCompleted, ///< <i>Missing explanation</i>
sendCOMMITstate, ///< <i>Missing explanation</i>
sendABORT, ///< <i>Missing explanation</i>
sendABORTfail, ///< <i>Missing explanation</i>
sendTC_ROLLBACK, ///< <i>Missing explanation</i>
sendTC_COMMIT, ///< <i>Missing explanation</i>
sendTC_OP ///< <i>Missing explanation</i>
};
/**
* <i>Missing explanation</i>
*/
enum ListState {
NotInList, ///< <i>Missing explanation</i>
InPreparedList, ///< <i>Missing explanation</i>
InSendList, ///< <i>Missing explanation</i>
InCompletedList ///< <i>Missing explanation</i>
};
/**
* Commit status of the transaction
*/
enum CommitStatusType {
NotStarted, ///< Transaction not yet started
Started, ///< <i>Missing explanation</i>
Committed, ///< Transaction has been committed
Aborted, ///< Transaction has been aborted
NeedAbort ///< <i>Missing explanation</i>
};
/**
* Commit type of transaction
*/
enum AbortOption {
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
CommitIfFailFree = 0,
CommitAsMuchAsPossible = 2, ///< Commit transaction with as many
TryCommit = 0, ///< <i>Missing explanation</i>
#endif
AbortOnError = 0, ///< Abort transaction on failed operation
IgnoreError = 2 ///< Transaction continues on failed operation
};
typedef AbortOption CommitType;
/**
* <i>Missing explanation</i>
*/
enum InitType {
NotConstructed, ///< <i>Missing explanation</i>
NotInitialised, ///< <i>Missing explanation</i>
StartingInit, ///< <i>Missing explanation</i>
Initialised, ///< <i>Missing explanation</i>
InitConfigError ///< <i>Missing explanation</i>
};
/**
* Type of attribute
*/
enum AttrType {
Signed, ///< Attributes of this type can be read with:
///< NdbRecAttr::int64_value,
///< NdbRecAttr::int32_value,
///< NdbRecAttr::short_value,
///< NdbRecAttr::char_value
UnSigned, ///< Attributes of this type can be read with:
///< NdbRecAttr::u_64_value,
///< NdbRecAttr::u_32_value,
///< NdbRecAttr::u_short_value,
///< NdbRecAttr::u_char_value
Float, ///< Attributes of this type can be read with:
///< NdbRecAttr::float_value and
///< NdbRecAttr::double_value
String, ///< Attributes of this type can be read with:
///< NdbRecAttr::aRef,
///< NdbRecAttr::getAttributeObject
NoAttrTypeDef ///< Used for debugging only
};
/**
* Execution type of transaction
*/
enum ExecType {
NoExecTypeDef = -1, ///< Erroneous type (Used for debugging only)
Prepare, ///< <i>Missing explanation</i>
NoCommit, ///< Execute the transaction as far as it has
///< been defined, but do not yet commit it
Commit, ///< Execute and try to commit the transaction
Rollback ///< Rollback transaction
};
/**
* Indicates whether the attribute is part of a primary key or not
*/
enum KeyType {
Undefined = -1, ///< Used for debugging only
NoKey, ///< Attribute is not part of primary key
///< or tuple identity
TupleKey, ///< Attribute is part of primary key
TupleId ///< Attribute is part of tuple identity
///< (This type of attribute is created
///< internally, and should not be
///< manually created.)
};
/**
* Indicate whether the attribute should be stored on disk or not
*/
enum StorageMode {
MMBased = 0, ///< Main memory
DiskBased = 1, ///< Disk (Not yet supported.)
NoStorageTypeDef ///< Used for debugging only
};
/**
* Where attribute is stored.
*
* This is used to indicate whether a primary key
* should only be stored in the index storage and not in the data storage
* or if it should be stored in both places.
* The first alternative makes the attribute take less space,
* but makes it impossible to scan using attribute.
*
* @note Use NormalStorageAttribute for most cases.
* (IndexStorageAttribute should only be used on primary key
* attributes and only if you do not want to scan using the attribute.)
*/
enum StorageAttributeType {
NoStorageAttributeTypeDefined = -1, ///< <i>Missing explanation</i>
IndexStorageAttribute, ///< Attribute is only stored in
///< index storage (ACC)
NormalStorageAttribute ///< Attribute values are stored
///< both in the index (ACC) and
///< in the data storage (TUP)
};
/**
* <i>Missing explanation</i>
*/
enum OperationStatus{
Init, ///< <i>Missing explanation</i>
OperationDefined, ///< <i>Missing explanation</i>
TupleKeyDefined, ///< <i>Missing explanation</i>
GetValue, ///< <i>Missing explanation</i>
SetValue, ///< <i>Missing explanation</i>
ExecInterpretedValue, ///< <i>Missing explanation</i>
SetValueInterpreted, ///< <i>Missing explanation</i>
FinalGetValue, ///< <i>Missing explanation</i>
SubroutineExec, ///< <i>Missing explanation</i>
SubroutineEnd, ///< <i>Missing explanation</i>
SetBound, ///< Setting bounds in range scan
WaitResponse, ///< <i>Missing explanation</i>
WaitCommitResponse, ///< <i>Missing explanation</i>
Finished, ///< <i>Missing explanation</i>
ReceiveFinished ///< <i>Missing explanation</i>
};
/**
* Type of operation
*/
enum OperationType {
ReadRequest = 0, ///< Read operation
UpdateRequest = 1, ///< Update Operation
InsertRequest = 2, ///< Insert Operation
DeleteRequest = 3, ///< Delete Operation
WriteRequest = 4, ///< Write Operation
ReadExclusive = 5, ///< Read exclusive
OpenScanRequest, ///< Scan Operation
OpenRangeScanRequest, ///< Range scan operation
NotDefined2, ///< <i>Missing explanation</i>
NotDefined ///< <i>Missing explanation</i>
};
/**
* <i>Missing explanation</i>
*/
enum ConStatusType {
NotConnected, ///< <i>Missing explanation</i>
Connecting, ///< <i>Missing explanation</i>
Connected, ///< <i>Missing explanation</i>
DisConnecting, ///< <i>Missing explanation</i>
ConnectFailure ///< <i>Missing explanation</i>
};
/**
* <i>Missing explanation</i>
*/
enum CompletionStatus {
NotCompleted, ///< <i>Missing explanation</i>
CompletedSuccess, ///< <i>Missing explanation</i>
CompletedFailure, ///< <i>Missing explanation</i>
DefinitionFailure ///< <i>Missing explanation</i>
};
/**
* Type of fragmentation used for a table
*/
enum FragmentType {
Default = 0, ///< (All is default!)
Single = 1, ///< Only one fragment
All = 2, ///< Default value. One fragment per node group
DistributionGroup = 3, ///< Distribution Group used for fragmentation.
///< One fragment per node group
DistributionKey = 4, ///< Distribution Key used for fragmentation.
///< One fragment per node group.
AllLarge = 5, ///< Sixten fragments per node group.
DGroupLarge = 6, ///< Distribution Group used for fragmentation.
///< Sixten fragments per node group
DKeyLarge = 7 ///< Distribution Key used for fragmentation.
///< Sixten fragments per node group
};
/**
* Type of table or index.
*/
enum TableType {
UndefTableType = 0,
SystemTable = 1, ///< Internal. Table cannot be updated by user
UserTable = 2, ///< Normal application table
UniqueHashIndex = 3, ///< Unique un-ordered hash index
HashIndex = 4, ///< Non-unique un-ordered hash index
UniqueOrderedIndex = 5, ///< Unique ordered index
OrderedIndex = 6 ///< Non-unique ordered index
};
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Different types of tampering with the NDB Cluster.
* <b>Only for debugging purposes only.</b>
*/
enum TamperType {
LockGlbChp = 1, ///< Lock GCP
UnlockGlbChp, ///< Unlock GCP
CrashNode, ///< Crash an NDB node
ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster
InsertError ///< Execute an error in NDB Cluster
///< (may crash system)
};
#endif
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* @deprecated
*/
enum NullAttributeType {
NoNullTypeDefined = -1,
NotNullAttribute,
NullAttribute,
AttributeDefined
};
#endif
#endif
...@@ -35,21 +35,16 @@ ...@@ -35,21 +35,16 @@
#define USE_DISCOVER_ON_STARTUP #define USE_DISCOVER_ON_STARTUP
//#define USE_NDB_POOL //#define USE_NDB_POOL
#define USE_EXTRA_ORDERED_INDEX
// Default value for parallelism // Default value for parallelism
static const int parallelism= 240; static const int parallelism= 240;
// Default value for max number of transactions
// createable against NDB from this handler
static const int max_transactions = 256;
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
/*
All error messages returned from ha_ndbcluster that are
not mapped to the corresponding handler(HA_ERR_*) error code
have NDB_ERR_CODE_OFFSET added to it so that it does not clash with
the handler error codes. The error number is then "restored"
to the original error number when get_error_message is called.
*/
#define NDB_ERR_CODE_OFFSET 30000
#define ERR_PRINT(err) \ #define ERR_PRINT(err) \
DBUG_PRINT("error", ("Error: %d message: %s", err.code, err.message)) DBUG_PRINT("error", ("Error: %d message: %s", err.code, err.message))
...@@ -68,10 +63,6 @@ typedef NdbDictionary::Dictionary NDBDICT; ...@@ -68,10 +63,6 @@ typedef NdbDictionary::Dictionary NDBDICT;
bool ndbcluster_inited= false; bool ndbcluster_inited= false;
#ifdef USE_EXTRA_ORDERED_INDEX
static const char* unique_suffix= "$unique";
#endif
static Ndb* g_ndb= NULL; static Ndb* g_ndb= NULL;
// Handler synchronization // Handler synchronization
...@@ -131,7 +122,7 @@ static int ndb_to_mysql_error(const NdbError *err) ...@@ -131,7 +122,7 @@ static int ndb_to_mysql_error(const NdbError *err)
for (i=0 ; err_map[i].ndb_err != err->code ; i++) for (i=0 ; err_map[i].ndb_err != err->code ; i++)
{ {
if (err_map[i].my_err == -1) if (err_map[i].my_err == -1)
return err->code+NDB_ERR_CODE_OFFSET; return err->code;
} }
return err_map[i].my_err; return err_map[i].my_err;
} }
...@@ -173,24 +164,20 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) ...@@ -173,24 +164,20 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
error message of NDB error message of NDB
*/ */
const char* ha_ndbcluster::get_error_message(int *org_error, bool ha_ndbcluster::get_error_message(int error,
bool *temporary) String *buf)
{ {
DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_ENTER("ha_ndbcluster::get_error_message");
DBUG_PRINT("enter", ("error: %d", *org_error)); DBUG_PRINT("enter", ("error: %d", error));
int error= *org_error; if (!m_ndb)
if (error < NDB_ERR_CODE_OFFSET) DBUG_RETURN(false);
DBUG_RETURN(NULL);
error-= NDB_ERR_CODE_OFFSET;
DBUG_ASSERT(m_ndb); // What should be done if not m_ndb is available?
const NdbError err= m_ndb->getNdbError(error); const NdbError err= m_ndb->getNdbError(error);
*temporary= (err.status==NdbError::TemporaryError); bool temporary= err.status==NdbError::TemporaryError;
buf->set(err.message, strlen(err.message), &my_charset_bin);
*org_error= error; DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary));
DBUG_PRINT("exit", ("error: %d, msg: %s", error, err.message)); DBUG_RETURN(temporary);
DBUG_RETURN(err.message);
} }
...@@ -348,7 +335,7 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -348,7 +335,7 @@ int ha_ndbcluster::get_metadata(const char *path)
const NDBTAB *tab; const NDBTAB *tab;
const void *data, *pack_data; const void *data, *pack_data;
const char **key_name; const char **key_name;
uint ndb_columns, mysql_columns, length, pack_length, i; uint ndb_columns, mysql_columns, length, pack_length;
int error; int error;
DBUG_ENTER("get_metadata"); DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
...@@ -404,24 +391,28 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -404,24 +391,28 @@ int ha_ndbcluster::get_metadata(const char *path)
// All checks OK, lets use the table // All checks OK, lets use the table
m_table= (void*)tab; m_table= (void*)tab;
for (i= 0; i < MAX_KEY; i++) DBUG_RETURN(build_index_list());
{ }
m_indextype[i]= UNDEFINED_INDEX;
m_unique_index_name[i]= NULL; int ha_ndbcluster::build_index_list()
} {
char *name;
const char *index_name;
static const char* unique_suffix= "$unique";
uint i, name_len;
DBUG_ENTER("build_index_list");
// Save information about all known indexes // Save information about all known indexes
for (i= 0; i < table->keys; i++) for (uint i= 0; i < table->keys; i++)
{ {
m_indextype[i]= get_index_type_from_table(i); NDB_INDEX_TYPE idx_type= get_index_type_from_table(i);
m_indextype[i]= idx_type;
#ifdef USE_EXTRA_ORDERED_INDEX if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX)
if (m_indextype[i] == UNIQUE_INDEX)
{ {
char *name; index_name= get_index_name(i);
const char *index_name= get_index_name(i); name_len= strlen(index_name)+strlen(unique_suffix)+1;
int name_len= strlen(index_name)+strlen(unique_suffix)+1; // Create name for unique index by appending "$unique";
if (!(name= my_malloc(name_len, MYF(MY_WME)))) if (!(name= my_malloc(name_len, MYF(MY_WME))))
DBUG_RETURN(2); DBUG_RETURN(2);
strxnmov(name, name_len, index_name, unique_suffix, NullS); strxnmov(name, name_len, index_name, unique_suffix, NullS);
...@@ -429,40 +420,42 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -429,40 +420,42 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_PRINT("info", ("Created unique index name: %s for index %d", DBUG_PRINT("info", ("Created unique index name: %s for index %d",
name, i)); name, i));
} }
#endif
} }
DBUG_RETURN(0); DBUG_RETURN(0);
} }
/* /*
Decode the type of an index from information Decode the type of an index from information
provided in table object provided in table object
*/ */
NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint index_no) const NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
{ {
if (index_no == table->primary_key) bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH);
return PRIMARY_KEY_INDEX; if (inx == table->primary_key)
return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX;
else else
return ((table->key_info[index_no].flags & HA_NOSAME) ? return ((table->key_info[inx].flags & HA_NOSAME) ?
UNIQUE_INDEX : (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) :
ORDERED_INDEX); ORDERED_INDEX);
} }
void ha_ndbcluster::release_metadata() void ha_ndbcluster::release_metadata()
{ {
int i; uint i;
DBUG_ENTER("release_metadata"); DBUG_ENTER("release_metadata");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
m_table= NULL; m_table= NULL;
// Release index list
for (i= 0; i < MAX_KEY; i++) for (i= 0; i < MAX_KEY; i++)
{ {
my_free((char*)m_unique_index_name[i], MYF(MY_ALLOW_ZERO_PTR)); if (m_unique_index_name[i])
my_free((char*)m_unique_index_name[i], MYF(0));
m_unique_index_name[i]= NULL; m_unique_index_name[i]= NULL;
} }
...@@ -481,6 +474,9 @@ static const ulong index_type_flags[]= ...@@ -481,6 +474,9 @@ static const ulong index_type_flags[]=
0, 0,
/* PRIMARY_KEY_INDEX */ /* PRIMARY_KEY_INDEX */
HA_NOT_READ_PREFIX_LAST,
/* PRIMARY_KEY_ORDERED_INDEX */
/* /*
Enable HA_KEY_READ_ONLY when "sorted" indexes are supported, Enable HA_KEY_READ_ONLY when "sorted" indexes are supported,
thus ORDERD BY clauses can be optimized by reading directly thus ORDERD BY clauses can be optimized by reading directly
...@@ -491,6 +487,9 @@ static const ulong index_type_flags[]= ...@@ -491,6 +487,9 @@ static const ulong index_type_flags[]=
/* UNIQUE_INDEX */ /* UNIQUE_INDEX */
HA_NOT_READ_PREFIX_LAST, HA_NOT_READ_PREFIX_LAST,
/* UNIQUE_ORDERED_INDEX */
HA_NOT_READ_PREFIX_LAST,
/* ORDERED_INDEX */ /* ORDERED_INDEX */
HA_READ_NEXT | HA_READ_NEXT |
HA_READ_PREV | HA_READ_PREV |
...@@ -506,15 +505,8 @@ inline const char* ha_ndbcluster::get_index_name(uint idx_no) const ...@@ -506,15 +505,8 @@ inline const char* ha_ndbcluster::get_index_name(uint idx_no) const
inline const char* ha_ndbcluster::get_unique_index_name(uint idx_no) const inline const char* ha_ndbcluster::get_unique_index_name(uint idx_no) const
{ {
#ifdef USE_EXTRA_ORDERED_INDEX
DBUG_ASSERT(idx_no < MAX_KEY);
DBUG_ASSERT(m_unique_index_name[idx_no]);
return m_unique_index_name[idx_no]; return m_unique_index_name[idx_no];
#else }
return get_index_name(idx_no);
#endif
}
inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
{ {
...@@ -1521,7 +1513,7 @@ int ha_ndbcluster::index_read(byte *buf, ...@@ -1521,7 +1513,7 @@ int ha_ndbcluster::index_read(byte *buf,
start_key.key= key; start_key.key= key;
start_key.length= key_len; start_key.length= key_len;
start_key.flag= find_flag; start_key.flag= find_flag;
DBUG_RETURN(read_range_first(&start_key, NULL, true)); DBUG_RETURN(read_range_first(&start_key, NULL, false, true));
} }
...@@ -1573,18 +1565,19 @@ int ha_ndbcluster::index_last(byte *buf) ...@@ -1573,18 +1565,19 @@ int ha_ndbcluster::index_last(byte *buf)
int ha_ndbcluster::read_range_first(const key_range *start_key, int ha_ndbcluster::read_range_first(const key_range *start_key,
const key_range *end_key, const key_range *end_key,
bool sorted) bool eq_range, bool sorted)
{ {
KEY* key_info; KEY* key_info;
int error= 1; int error= 1;
byte* buf= table->record[0]; byte* buf= table->record[0];
DBUG_ENTER("ha_ndbcluster::read_range_first"); DBUG_ENTER("ha_ndbcluster::read_range_first");
DBUG_PRINT("info", ("sorted: %d", sorted)); DBUG_PRINT("info", ("eq_range: %d, sorted: %d", eq_range, sorted));
if (m_active_cursor) if (m_active_cursor)
close_scan(); close_scan();
switch (get_index_type(active_index)){ switch (get_index_type(active_index)){
case PRIMARY_KEY_ORDERED_INDEX:
case PRIMARY_KEY_INDEX: case PRIMARY_KEY_INDEX:
key_info= table->key_info + active_index; key_info= table->key_info + active_index;
if (start_key && if (start_key &&
...@@ -1595,6 +1588,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key, ...@@ -1595,6 +1588,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
} }
break; break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX: case UNIQUE_INDEX:
key_info= table->key_info + active_index; key_info= table->key_info + active_index;
if (start_key && if (start_key &&
...@@ -1618,7 +1612,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key, ...@@ -1618,7 +1612,7 @@ int ha_ndbcluster::read_range_first(const key_range *start_key,
} }
int ha_ndbcluster::read_range_next(bool eq_range) int ha_ndbcluster::read_range_next()
{ {
DBUG_ENTER("ha_ndbcluster::read_range_next"); DBUG_ENTER("ha_ndbcluster::read_range_next");
DBUG_RETURN(next_result(table->record[0])); DBUG_RETURN(next_result(table->record[0]));
...@@ -2042,6 +2036,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2042,6 +2036,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if (lock_type != F_UNLCK) if (lock_type != F_UNLCK)
{ {
DBUG_PRINT("info", ("lock_type != F_UNLCK"));
if (!thd->transaction.ndb_lock_count++) if (!thd->transaction.ndb_lock_count++)
{ {
PRINT_OPTION_FLAGS(thd); PRINT_OPTION_FLAGS(thd);
...@@ -2114,6 +2109,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2114,6 +2109,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
} }
else else
{ {
DBUG_PRINT("info", ("lock_type == F_UNLCK"));
if (!--thd->transaction.ndb_lock_count) if (!--thd->transaction.ndb_lock_count)
{ {
DBUG_PRINT("trans", ("Last external_lock")); DBUG_PRINT("trans", ("Last external_lock"));
...@@ -2390,15 +2386,8 @@ int ha_ndbcluster::create(const char *name, ...@@ -2390,15 +2386,8 @@ int ha_ndbcluster::create(const char *name,
DBUG_PRINT("info", ("Table %s/%s created successfully", DBUG_PRINT("info", ("Table %s/%s created successfully",
m_dbname, m_tabname)); m_dbname, m_tabname));
// Fetch table from NDB, check that it exists if ((my_errno= build_index_list()))
const NDBTAB *tab2= dict->getTable(m_tabname);
if (tab2 == NULL)
{
const NdbError err= dict->getNdbError();
ERR_PRINT(err);
my_errno= ndb_to_mysql_error(&err);
DBUG_RETURN(my_errno); DBUG_RETURN(my_errno);
}
// Create secondary indexes // Create secondary indexes
KEY* key_info= form->key_info; KEY* key_info= form->key_info;
...@@ -2407,17 +2396,29 @@ int ha_ndbcluster::create(const char *name, ...@@ -2407,17 +2396,29 @@ int ha_ndbcluster::create(const char *name,
{ {
int error= 0; int error= 0;
DBUG_PRINT("info", ("Index %u: %s", i, *key_name)); DBUG_PRINT("info", ("Index %u: %s", i, *key_name));
if (i == form->primary_key)
{ switch (get_index_type_from_table(i)){
#ifdef USE_EXTRA_ORDERED_INDEX
case PRIMARY_KEY_INDEX:
// Do nothing, already created
break;
case PRIMARY_KEY_ORDERED_INDEX:
error= create_ordered_index(*key_name, key_info); error= create_ordered_index(*key_name, key_info);
#endif break;
} case UNIQUE_ORDERED_INDEX:
else if (key_info->flags & HA_NOSAME) if (!(error= create_ordered_index(*key_name, key_info)))
error= create_unique_index(*key_name, key_info); error= create_unique_index(get_unique_index_name(i), key_info);
else break;
case UNIQUE_INDEX:
error= create_unique_index(get_unique_index_name(i), key_info);
break;
case ORDERED_INDEX:
error= create_ordered_index(*key_name, key_info); error= create_ordered_index(*key_name, key_info);
break;
default:
DBUG_ASSERT(false);
break;
}
if (error) if (error)
{ {
...@@ -2442,29 +2443,9 @@ int ha_ndbcluster::create_ordered_index(const char *name, ...@@ -2442,29 +2443,9 @@ int ha_ndbcluster::create_ordered_index(const char *name,
int ha_ndbcluster::create_unique_index(const char *name, int ha_ndbcluster::create_unique_index(const char *name,
KEY *key_info) KEY *key_info)
{ {
int error;
const char* unique_name= name;
DBUG_ENTER("create_unique_index");
#ifdef USE_EXTRA_ORDERED_INDEX
char buf[FN_HEADLEN];
strxnmov(buf, FN_HEADLEN, name, unique_suffix, NullS);
unique_name= buf;
#endif
error= create_index(unique_name, key_info, true);
if (error)
DBUG_RETURN(error);
#ifdef USE_EXTRA_ORDERED_INDEX DBUG_ENTER("create_unique_index");
/* DBUG_RETURN(create_index(name, key_info, true));
If unique index contains more then one attribute
an ordered index should be created to support
partial key search
*/
error= create_ordered_index(name, key_info);
#endif
DBUG_RETURN(error);
} }
...@@ -2751,7 +2732,7 @@ Ndb* ha_ndbcluster::seize_ndb() ...@@ -2751,7 +2732,7 @@ Ndb* ha_ndbcluster::seize_ndb()
#else #else
ndb= new Ndb(""); ndb= new Ndb("");
#endif #endif
if (ndb->init(NDB_MAX_TRANSACTIONS) != 0) if (ndb->init(max_transactions) != 0)
{ {
ERR_PRINT(ndb->getNdbError()); ERR_PRINT(ndb->getNdbError());
/* /*
...@@ -3051,49 +3032,27 @@ ha_rows ...@@ -3051,49 +3032,27 @@ ha_rows
ha_ndbcluster::records_in_range(uint inx, key_range *min_key, ha_ndbcluster::records_in_range(uint inx, key_range *min_key,
key_range *max_key) key_range *max_key)
{ {
ha_rows records= 10; /* Good guess when you don't know anything */
KEY *key_info= table->key_info + inx; KEY *key_info= table->key_info + inx;
uint key_length= key_info->key_length; uint key_length= key_info->key_length;
NDB_INDEX_TYPE idx_type= get_index_type(inx);
DBUG_ENTER("records_in_range"); DBUG_ENTER("records_in_range");
DBUG_PRINT("enter", ("inx: %u", inx)); DBUG_PRINT("enter", ("inx: %u", inx));
DBUG_DUMP("start_key", min_key->key, min_key->length);
DBUG_DUMP("end_key", max_key->key, max_key->length);
DBUG_PRINT("enter", ("start_search_flag: %u end_search_flag: %u",
min_key->flag, max_key->flag));
#ifndef USE_EXTRA_ORDERED_INDEX // Prevent partial read of hash indexes by returning HA_POS_ERROR
/*
Check that start_key_len is equal to
the length of the used index and
prevent partial scan/read of hash indexes by returning HA_POS_ERROR
*/
NDB_INDEX_TYPE idx_type= get_index_type(inx);
if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) && if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
min_key->length < key_length) ((min_key && min_key->length < key_length) ||
{ (max_key && max_key->length < key_length)))
DBUG_PRINT("warning", ("Tried to use index which required" DBUG_RETURN(HA_POS_ERROR);
"full key length: %d, HA_POS_ERROR",
key_length)); // Read from hash index with full key
records= HA_POS_ERROR; // This is a "const" table which returns only one record!
} if ((idx_type != ORDERED_INDEX) &&
#else ((min_key && min_key->length == key_length) ||
/* (max_key && max_key->length == key_length)))
Extra ordered indexes are created primarily DBUG_RETURN(1);
to support partial key scan/read and range scans of hash indexes.
I.e. the ordered index are used instead of the hash indexes for DBUG_RETURN(10); /* Good guess when you don't know anything */
these queries.
*/
NDB_INDEX_TYPE idx_type= get_index_type(inx);
if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) &&
start_key_len == key_length)
{
// this is a "const" table which returns only one record!
records= 1;
}
#endif
DBUG_PRINT("exit", ("records: %d", records));
DBUG_RETURN(records);
} }
......
...@@ -37,8 +37,10 @@ class NdbResultSet; // Forward declaration ...@@ -37,8 +37,10 @@ class NdbResultSet; // Forward declaration
typedef enum ndb_index_type { typedef enum ndb_index_type {
UNDEFINED_INDEX = 0, UNDEFINED_INDEX = 0,
PRIMARY_KEY_INDEX = 1, PRIMARY_KEY_INDEX = 1,
UNIQUE_INDEX = 2, PRIMARY_KEY_ORDERED_INDEX = 2,
ORDERED_INDEX = 3 UNIQUE_INDEX = 3,
UNIQUE_ORDERED_INDEX = 4,
ORDERED_INDEX = 5
} NDB_INDEX_TYPE; } NDB_INDEX_TYPE;
...@@ -78,10 +80,10 @@ class ha_ndbcluster: public handler ...@@ -78,10 +80,10 @@ class ha_ndbcluster: public handler
void position(const byte *record); void position(const byte *record);
int read_range_first(const key_range *start_key, int read_range_first(const key_range *start_key,
const key_range *end_key, const key_range *end_key,
bool sorted); bool eq_range, bool sorted);
int read_range_next(bool eq_range); int read_range_next();
const char* get_error_message(int *error, bool *temporary); bool get_error_message(int error, String *buf);
void info(uint); void info(uint);
int extra(enum ha_extra_function operation); int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size); int extra_opt(enum ha_extra_function operation, ulong cache_size);
...@@ -117,6 +119,8 @@ class ha_ndbcluster: public handler ...@@ -117,6 +119,8 @@ class ha_ndbcluster: public handler
const char* index_type(uint key_number) { const char* index_type(uint key_number) {
switch (get_index_type(key_number)) { switch (get_index_type(key_number)) {
case ORDERED_INDEX: case ORDERED_INDEX:
case UNIQUE_ORDERED_INDEX:
case PRIMARY_KEY_ORDERED_INDEX:
return "BTREE"; return "BTREE";
case UNIQUE_INDEX: case UNIQUE_INDEX:
case PRIMARY_KEY_INDEX: case PRIMARY_KEY_INDEX:
...@@ -141,6 +145,7 @@ class ha_ndbcluster: public handler ...@@ -141,6 +145,7 @@ class ha_ndbcluster: public handler
int create_ordered_index(const char *name, KEY *key_info); int create_ordered_index(const char *name, KEY *key_info);
int create_unique_index(const char *name, KEY *key_info); int create_unique_index(const char *name, KEY *key_info);
int initialize_autoincrement(const void* table); int initialize_autoincrement(const void* table);
int build_index_list();
int get_metadata(const char* path); int get_metadata(const char* path);
void release_metadata(); void release_metadata();
const char* get_index_name(uint idx_no) const; const char* get_index_name(uint idx_no) const;
......
...@@ -1123,14 +1123,15 @@ void handler::print_error(int error, myf errflag) ...@@ -1123,14 +1123,15 @@ void handler::print_error(int error, myf errflag)
/* The error was "unknown" to this function. /* The error was "unknown" to this function.
Ask handler if it has got a message for this error */ Ask handler if it has got a message for this error */
bool temporary= FALSE; bool temporary= FALSE;
const char* msg= get_error_message(&error, &temporary); String str;
if (msg) temporary= get_error_message(error, &str);
if (!str.is_empty())
{ {
const char* engine= ha_get_storage_engine(table->db_type); const char* engine= ha_get_storage_engine(table->db_type);
if (temporary) if (temporary)
my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,msg,engine); my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,str.ptr(),engine);
else else
my_error(ER_GET_ERRMSG,MYF(0),error,msg,engine); my_error(ER_GET_ERRMSG,MYF(0),error,str.ptr(),engine);
} }
else else
my_error(ER_GET_ERRNO,errflag,error); my_error(ER_GET_ERRNO,errflag,error);
...@@ -1146,15 +1147,15 @@ void handler::print_error(int error, myf errflag) ...@@ -1146,15 +1147,15 @@ void handler::print_error(int error, myf errflag)
Return an error message specific to this handler Return an error message specific to this handler
SYNOPSIS SYNOPSIS
error [in/out] error code previously returned by handler error error code previously returned by handler
temporary [out] temporary error, transaction should be retried if true buf Pointer to String where to add error message
The returned pointer to error message should not be freed. Returns true if this is a temporary error
*/ */
const char* handler::get_error_message(int *error, bool *temporary) bool handler::get_error_message(int error, String* buf)
{ {
return NULL; return false;
} }
......
...@@ -288,7 +288,7 @@ class handler :public Sql_alloc ...@@ -288,7 +288,7 @@ class handler :public Sql_alloc
void update_timestamp(byte *record); void update_timestamp(byte *record);
void update_auto_increment(); void update_auto_increment();
virtual void print_error(int error, myf errflag); virtual void print_error(int error, myf errflag);
virtual const char* get_error_message(int *error, bool *temporary); virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error); uint get_dup_key(int error);
void change_table_ptr(TABLE *table_arg) { table=table_arg; } void change_table_ptr(TABLE *table_arg) { table=table_arg; }
virtual double scan_time() virtual double scan_time()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment