Commit f896a690 authored by tomas@poseidon.(none)'s avatar tomas@poseidon.(none)

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-4.1

into poseidon.(none):/home/tomas/mysql-4.1-ndb-merge
parents 020731f7 62c5d4a3
...@@ -18,12 +18,12 @@ col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, ...@@ -18,12 +18,12 @@ col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null,
col6 int not null, to_be_deleted int) ENGINE=ndbcluster; col6 int not null, to_be_deleted int) ENGINE=ndbcluster;
show table status; show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 1 NULL NULL NULL latin1_swedish_ci NULL t1 ndbcluster 9 Dynamic 0 0 0 NULL 0 0 1 NULL NULL NULL latin1_swedish_ci NULL
insert into t1 values insert into t1 values
(0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7); (0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7);
show table status; show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL t1 ndbcluster 9 Dynamic 9 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL
select * from t1 order by col1; select * from t1 order by col1;
col1 col2 col3 col4 col5 col6 to_be_deleted col1 col2 col3 col4 col5 col6 to_be_deleted
0 4 3 5 PENDING 1 7 0 4 3 5 PENDING 1 7
...@@ -43,7 +43,7 @@ change column col2 fourth varchar(30) not null after col3, ...@@ -43,7 +43,7 @@ change column col2 fourth varchar(30) not null after col3,
modify column col6 int not null first; modify column col6 int not null first;
show table status; show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL t1 ndbcluster 9 Dynamic 9 0 0 NULL 0 0 102 NULL NULL NULL latin1_swedish_ci NULL
select * from t1 order by col1; select * from t1 order by col1;
col6 col1 col3 fourth col4 col4_5 col5 col7 col8 col6 col1 col3 fourth col4 col4_5 col5 col7 col8
1 0 3 4 5 PENDING 0000-00-00 00:00:00 1 0 3 4 5 PENDING 0000-00-00 00:00:00
...@@ -58,7 +58,7 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8 ...@@ -58,7 +58,7 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8
insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00'); insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00');
show table status; show table status;
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ndbcluster 9 Dynamic 100 0 0 NULL 0 0 103 NULL NULL NULL latin1_swedish_ci NULL t1 ndbcluster 9 Dynamic 10 0 0 NULL 0 0 103 NULL NULL NULL latin1_swedish_ci NULL
select * from t1 order by col1; select * from t1 order by col1;
col6 col1 col3 fourth col4 col4_5 col5 col7 col8 col6 col1 col3 fourth col4 col4_5 col5 col7 col8
1 0 3 4 5 PENDING 0000-00-00 00:00:00 1 0 3 4 5 PENDING 0000-00-00 00:00:00
......
...@@ -150,7 +150,7 @@ insert into t1 values(9,'b9',999,'dd9'); ...@@ -150,7 +150,7 @@ insert into t1 values(9,'b9',999,'dd9');
commit; commit;
explain select * from t1; explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1 SIMPLE t1 ALL NULL NULL NULL NULL 9
select * from t1 order by a; select * from t1 order by a;
a b c d a b c d
1 b1 111 dd1 1 b1 111 dd1
...@@ -185,7 +185,7 @@ insert into t1 values(2,@b2,222,@d2); ...@@ -185,7 +185,7 @@ insert into t1 values(2,@b2,222,@d2);
commit; commit;
explain select * from t1; explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100 1 SIMPLE t1 ALL NULL NULL NULL NULL 2
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3) select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a; from t1 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3) a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
......
...@@ -1066,6 +1066,9 @@ public: ...@@ -1066,6 +1066,9 @@ public:
Dictionary(NdbDictionaryImpl&); Dictionary(NdbDictionaryImpl&);
const Table * getIndexTable(const char * indexName, const Table * getIndexTable(const char * indexName,
const char * tableName); const char * tableName);
public:
const Table * getTable(const char * name, void **data);
void set_local_table_data_size(unsigned sz);
}; };
}; };
......
...@@ -33,6 +33,10 @@ ...@@ -33,6 +33,10 @@
* Section names * Section names
****************************************************************************/ ****************************************************************************/
#define DB_TOKEN_PRINT "ndbd(DB)"
#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)"
#define API_TOKEN_PRINT "mysqld(API)"
#define DB_TOKEN "DB" #define DB_TOKEN "DB"
#define MGM_TOKEN "MGM" #define MGM_TOKEN "MGM"
#define API_TOKEN "API" #define API_TOKEN "API"
...@@ -327,7 +331,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -327,7 +331,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_SYS_PRIMARY_MGM_NODE, CFG_SYS_PRIMARY_MGM_NODE,
"PrimaryMGMNode", "PrimaryMGMNode",
"SYSTEM", "SYSTEM",
"Node id of Primary "MGM_TOKEN" node", "Node id of Primary "MGM_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -388,7 +392,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -388,7 +392,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_NODE_ID, CFG_NODE_ID,
"Id", "Id",
DB_TOKEN, DB_TOKEN,
"Number identifying the database node ("DB_TOKEN")", "Number identifying the database node ("DB_TOKEN_PRINT")",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -484,7 +488,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -484,7 +488,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_INDEX_OPS, CFG_DB_NO_INDEX_OPS,
"MaxNoOfConcurrentIndexOperations", "MaxNoOfConcurrentIndexOperations",
DB_TOKEN, DB_TOKEN,
"Total number of index operations that can execute simultaneously on one "DB_TOKEN" node", "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -509,7 +513,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -509,7 +513,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_TRIGGER_OPS, CFG_DB_NO_TRIGGER_OPS,
"MaxNoOfFiredTriggers", "MaxNoOfFiredTriggers",
DB_TOKEN, DB_TOKEN,
"Total number of triggers that can fire simultaneously in one "DB_TOKEN" node", "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -568,7 +572,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -568,7 +572,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_STOP_ON_ERROR, CFG_DB_STOP_ON_ERROR,
"StopOnError", "StopOnError",
DB_TOKEN, DB_TOKEN,
"If set to N, "DB_TOKEN" automatically restarts/recovers in case of node failure", "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure",
ConfigInfo::USED, ConfigInfo::USED,
true, true,
ConfigInfo::BOOL, ConfigInfo::BOOL,
...@@ -640,7 +644,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -640,7 +644,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_TRANSACTIONS, CFG_DB_NO_TRANSACTIONS,
"MaxNoOfConcurrentTransactions", "MaxNoOfConcurrentTransactions",
DB_TOKEN, DB_TOKEN,
"Max number of transaction executing concurrently on the "DB_TOKEN" node", "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -652,7 +656,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -652,7 +656,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_SCANS, CFG_DB_NO_SCANS,
"MaxNoOfConcurrentScans", "MaxNoOfConcurrentScans",
DB_TOKEN, DB_TOKEN,
"Max number of scans executing concurrently on the "DB_TOKEN" node", "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -664,7 +668,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -664,7 +668,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_TRANS_BUFFER_MEM, CFG_DB_TRANS_BUFFER_MEM,
"TransactionBufferMemory", "TransactionBufferMemory",
DB_TOKEN, DB_TOKEN,
"Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN" node", "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -676,7 +680,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -676,7 +680,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_INDEX_MEM, CFG_DB_INDEX_MEM,
"IndexMemory", "IndexMemory",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for storing indexes", "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT64, ConfigInfo::INT64,
...@@ -688,7 +692,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -688,7 +692,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_DATA_MEM, CFG_DB_DATA_MEM,
"DataMemory", "DataMemory",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for storing data", "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT64, ConfigInfo::INT64,
...@@ -700,7 +704,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -700,7 +704,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_UNDO_INDEX_BUFFER, CFG_DB_UNDO_INDEX_BUFFER,
"UndoIndexBuffer", "UndoIndexBuffer",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for index part", "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -712,7 +716,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -712,7 +716,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_UNDO_DATA_BUFFER, CFG_DB_UNDO_DATA_BUFFER,
"UndoDataBuffer", "UndoDataBuffer",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for writing UNDO logs for data part", "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -724,7 +728,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -724,7 +728,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_REDO_BUFFER, CFG_DB_REDO_BUFFER,
"RedoBuffer", "RedoBuffer",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for writing REDO logs", "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -736,7 +740,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -736,7 +740,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_LONG_SIGNAL_BUFFER, CFG_DB_LONG_SIGNAL_BUFFER,
"LongMessageBuffer", "LongMessageBuffer",
DB_TOKEN, DB_TOKEN,
"Number bytes on each "DB_TOKEN" node allocated for internal long messages", "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -784,7 +788,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -784,7 +788,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_HEARTBEAT_INTERVAL, CFG_DB_HEARTBEAT_INTERVAL,
"HeartbeatIntervalDbDb", "HeartbeatIntervalDbDb",
DB_TOKEN, DB_TOKEN,
"Time between "DB_TOKEN"-"DB_TOKEN" heartbeats. "DB_TOKEN" considered dead after 3 missed HBs", "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs",
ConfigInfo::USED, ConfigInfo::USED,
true, true,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -796,7 +800,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -796,7 +800,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_API_HEARTBEAT_INTERVAL, CFG_DB_API_HEARTBEAT_INTERVAL,
"HeartbeatIntervalDbApi", "HeartbeatIntervalDbApi",
DB_TOKEN, DB_TOKEN,
"Time between "API_TOKEN"-"DB_TOKEN" heartbeats. "API_TOKEN" connection closed after 3 missed HBs", "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs",
ConfigInfo::USED, ConfigInfo::USED,
true, true,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -832,7 +836,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -832,7 +836,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_NO_REDOLOG_FILES, CFG_DB_NO_REDOLOG_FILES,
"NoOfFragmentLogFiles", "NoOfFragmentLogFiles",
DB_TOKEN, DB_TOKEN,
"No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN" node", "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -844,7 +848,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -844,7 +848,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
KEY_INTERNAL, KEY_INTERNAL,
"MaxNoOfOpenFiles", "MaxNoOfOpenFiles",
DB_TOKEN, DB_TOKEN,
"Max number of files open per "DB_TOKEN" node.(One thread is created per file)", "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -998,7 +1002,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -998,7 +1002,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_DB_FILESYSTEM_PATH, CFG_DB_FILESYSTEM_PATH,
"FileSystemPath", "FileSystemPath",
DB_TOKEN, DB_TOKEN,
"Path to directory where the "DB_TOKEN" node stores its data (directory must exist)", "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::STRING, ConfigInfo::STRING,
...@@ -1288,7 +1292,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1288,7 +1292,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_NODE_ID, CFG_NODE_ID,
"Id", "Id",
API_TOKEN, API_TOKEN,
"Number identifying application node ("API_TOKEN")", "Number identifying application node ("API_TOKEN_PRINT")",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1311,7 +1315,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1311,7 +1315,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_NODE_ARBIT_RANK, CFG_NODE_ARBIT_RANK,
"ArbitrationRank", "ArbitrationRank",
API_TOKEN, API_TOKEN,
"If 0, then "API_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1419,7 +1423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1419,7 +1423,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_NODE_ID, CFG_NODE_ID,
"Id", "Id",
MGM_TOKEN, MGM_TOKEN,
"Number identifying the management server node ("MGM_TOKEN")", "Number identifying the management server node ("MGM_TOKEN_PRINT")",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1489,7 +1493,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1489,7 +1493,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_NODE_ARBIT_RANK, CFG_NODE_ARBIT_RANK,
"ArbitrationRank", "ArbitrationRank",
MGM_TOKEN, MGM_TOKEN,
"If 0, then "MGM_TOKEN" is not arbitrator. Kernel selects arbitrators in order 1, 2", "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1550,7 +1554,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1550,7 +1554,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_1, CFG_CONNECTION_NODE_1,
"NodeId1", "NodeId1",
"TCP", "TCP",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::STRING, ConfigInfo::STRING,
...@@ -1561,7 +1565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1561,7 +1565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_2, CFG_CONNECTION_NODE_2,
"NodeId2", "NodeId2",
"TCP", "TCP",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::STRING, ConfigInfo::STRING,
...@@ -1681,7 +1685,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1681,7 +1685,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_1, CFG_CONNECTION_NODE_1,
"NodeId1", "NodeId1",
"SHM", "SHM",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::STRING, ConfigInfo::STRING,
...@@ -1704,7 +1708,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1704,7 +1708,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_2, CFG_CONNECTION_NODE_2,
"NodeId2", "NodeId2",
"SHM", "SHM",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::STRING, ConfigInfo::STRING,
...@@ -1801,7 +1805,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1801,7 +1805,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_1, CFG_CONNECTION_NODE_1,
"NodeId1", "NodeId1",
"SCI", "SCI",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1813,7 +1817,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1813,7 +1817,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_2, CFG_CONNECTION_NODE_2,
"NodeId2", "NodeId2",
"SCI", "SCI",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1956,7 +1960,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1956,7 +1960,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_1, CFG_CONNECTION_NODE_1,
"NodeId1", "NodeId1",
"OSE", "OSE",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
...@@ -1968,7 +1972,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { ...@@ -1968,7 +1972,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_2, CFG_CONNECTION_NODE_2,
"NodeId2", "NodeId2",
"OSE", "OSE",
"Id of node ("DB_TOKEN", "API_TOKEN" or "MGM_TOKEN") on one side of the connection", "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED, ConfigInfo::USED,
false, false,
ConfigInfo::INT, ConfigInfo::INT,
......
...@@ -21,6 +21,31 @@ ...@@ -21,6 +21,31 @@
#include <NdbCondition.h> #include <NdbCondition.h>
#include <NdbSleep.h> #include <NdbSleep.h>
Ndb_local_table_info *
Ndb_local_table_info::create(NdbTableImpl *table_impl, Uint32 sz)
{
void *data= malloc(sizeof(NdbTableImpl)+sz-1);
if (data == 0)
return 0;
memset(data,0,sizeof(NdbTableImpl)+sz-1);
new (data) Ndb_local_table_info(table_impl);
return (Ndb_local_table_info *) data;
}
void Ndb_local_table_info::destroy(Ndb_local_table_info *info)
{
free((void *)info);
}
Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl)
{
m_table_impl= table_impl;
}
Ndb_local_table_info::~Ndb_local_table_info()
{
}
LocalDictCache::LocalDictCache(){ LocalDictCache::LocalDictCache(){
m_tableHash.createHashTable(); m_tableHash.createHashTable();
} }
...@@ -29,22 +54,24 @@ LocalDictCache::~LocalDictCache(){ ...@@ -29,22 +54,24 @@ LocalDictCache::~LocalDictCache(){
m_tableHash.releaseHashTable(); m_tableHash.releaseHashTable();
} }
NdbTableImpl * Ndb_local_table_info *
LocalDictCache::get(const char * name){ LocalDictCache::get(const char * name){
const Uint32 len = strlen(name); const Uint32 len = strlen(name);
return m_tableHash.getData(name, len); return m_tableHash.getData(name, len);
} }
void void
LocalDictCache::put(const char * name, NdbTableImpl * tab){ LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){
const Uint32 id = tab->m_tableId; const Uint32 id = tab_info->m_table_impl->m_tableId;
m_tableHash.insertKey(name, strlen(name), id, tab); m_tableHash.insertKey(name, strlen(name), id, tab_info);
} }
void void
LocalDictCache::drop(const char * name){ LocalDictCache::drop(const char * name){
m_tableHash.deleteKey(name, strlen(name)); Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name));
DBUG_ASSERT(info != 0);
Ndb_local_table_info::destroy(info);
} }
/***************************************************************** /*****************************************************************
......
...@@ -27,6 +27,17 @@ ...@@ -27,6 +27,17 @@
#include <Ndb.hpp> #include <Ndb.hpp>
#include "NdbLinHash.hpp" #include "NdbLinHash.hpp"
class Ndb_local_table_info {
public:
static Ndb_local_table_info *create(NdbTableImpl *table_impl, Uint32 sz=0);
static void destroy(Ndb_local_table_info *);
NdbTableImpl *m_table_impl;
char m_local_data[1];
private:
Ndb_local_table_info(NdbTableImpl *table_impl);
~Ndb_local_table_info();
};
/** /**
* A non thread safe dict cache * A non thread safe dict cache
*/ */
...@@ -35,12 +46,12 @@ public: ...@@ -35,12 +46,12 @@ public:
LocalDictCache(); LocalDictCache();
~LocalDictCache(); ~LocalDictCache();
NdbTableImpl * get(const char * name); Ndb_local_table_info * get(const char * name);
void put(const char * name, NdbTableImpl *); void put(const char * name, Ndb_local_table_info *);
void drop(const char * name); void drop(const char * name);
NdbLinHash<NdbTableImpl> m_tableHash; // On name NdbLinHash<Ndb_local_table_info> m_tableHash; // On name
}; };
/** /**
......
...@@ -753,9 +753,11 @@ Uint64 ...@@ -753,9 +753,11 @@ Uint64
Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize) Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
{ {
DEBUG_TRACE("getAutoIncrementValue"); DEBUG_TRACE("getAutoIncrementValue");
const NdbTableImpl* table = theDictionary->getTable(aTableName); const char * internalTableName = internalizeTableName(aTableName);
if (table == 0) Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName);
if (info == 0)
return ~0; return ~0;
const NdbTableImpl *table= info->m_table_impl;
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
return tupleId; return tupleId;
} }
...@@ -832,11 +834,13 @@ bool ...@@ -832,11 +834,13 @@ bool
Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase) Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase)
{ {
DEBUG_TRACE("setAutoIncrementValue " << val); DEBUG_TRACE("setAutoIncrementValue " << val);
const NdbTableImpl* table = theDictionary->getTable(aTableName); const char * internalTableName= internalizeTableName(aTableName);
if (table == 0) { Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName);
if (info == 0) {
theError= theDictionary->getNdbError(); theError= theDictionary->getNdbError();
return false; return false;
} }
const NdbTableImpl* table= info->m_table_impl;
return setTupleIdInNdb(table->m_tableId, val, increase); return setTupleIdInNdb(table->m_tableId, val, increase);
} }
......
...@@ -681,13 +681,23 @@ NdbDictionary::Dictionary::alterTable(const Table & t){ ...@@ -681,13 +681,23 @@ NdbDictionary::Dictionary::alterTable(const Table & t){
} }
const NdbDictionary::Table * const NdbDictionary::Table *
NdbDictionary::Dictionary::getTable(const char * name){ NdbDictionary::Dictionary::getTable(const char * name, void **data){
NdbTableImpl * t = m_impl.getTable(name); NdbTableImpl * t = m_impl.getTable(name, data);
if(t) if(t)
return t->m_facade; return t->m_facade;
return 0; return 0;
} }
void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
{
m_impl.m_local_table_data_size= sz;
}
const NdbDictionary::Table *
NdbDictionary::Dictionary::getTable(const char * name){
return getTable(name, 0);
}
void void
NdbDictionary::Dictionary::invalidateTable(const char * name){ NdbDictionary::Dictionary::invalidateTable(const char * name){
NdbTableImpl * t = m_impl.getTable(name); NdbTableImpl * t = m_impl.getTable(name);
......
...@@ -589,17 +589,19 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb, ...@@ -589,17 +589,19 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb,
m_ndb(ndb) m_ndb(ndb)
{ {
m_globalHash = 0; m_globalHash = 0;
m_local_table_data_size= 0;
} }
static int f_dictionary_count = 0; static int f_dictionary_count = 0;
NdbDictionaryImpl::~NdbDictionaryImpl() NdbDictionaryImpl::~NdbDictionaryImpl()
{ {
NdbElement_t<NdbTableImpl> * curr = m_localHash.m_tableHash.getNext(0); NdbElement_t<Ndb_local_table_info> * curr = m_localHash.m_tableHash.getNext(0);
if(m_globalHash){ if(m_globalHash){
while(curr != 0){ while(curr != 0){
m_globalHash->lock(); m_globalHash->lock();
m_globalHash->release(curr->theData); m_globalHash->release(curr->theData->m_table_impl);
Ndb_local_table_info::destroy(curr->theData);
m_globalHash->unlock(); m_globalHash->unlock();
curr = m_localHash.m_tableHash.getNext(curr); curr = m_localHash.m_tableHash.getNext(curr);
...@@ -620,7 +622,37 @@ NdbDictionaryImpl::~NdbDictionaryImpl() ...@@ -620,7 +622,37 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
} }
} }
Ndb_local_table_info *
NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName)
{
NdbTableImpl *impl;
m_globalHash->lock();
impl = m_globalHash->get(internalTableName);
m_globalHash->unlock();
if (impl == 0){
impl = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames());
m_globalHash->lock();
m_globalHash->put(internalTableName, impl);
m_globalHash->unlock();
if(impl == 0){
return 0;
}
}
Ndb_local_table_info *info= Ndb_local_table_info::create(impl, m_local_table_data_size);
m_localHash.put(internalTableName, info);
m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
m_ndb.theLastTupleId[impl->getTableId()] = ~0;
addBlobTables(*impl);
return info;
}
#if 0 #if 0
bool bool
...@@ -1504,7 +1536,6 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, ...@@ -1504,7 +1536,6 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
: createTable(&tSignal, ptr); : createTable(&tSignal, ptr);
if (!alter && haveAutoIncrement) { if (!alter && haveAutoIncrement) {
// if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) {
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) { if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) {
if (ndb.theError.code == 0) { if (ndb.theError.code == 0) {
m_error.code = 4336; m_error.code = 4336;
...@@ -1775,11 +1806,12 @@ NdbIndexImpl* ...@@ -1775,11 +1806,12 @@ NdbIndexImpl*
NdbDictionaryImpl::getIndexImpl(const char * externalName, NdbDictionaryImpl::getIndexImpl(const char * externalName,
const char * internalName) const char * internalName)
{ {
NdbTableImpl* tab = getTableImpl(internalName); Ndb_local_table_info * info = get_local_table_info(internalName);
if(tab == 0){ if(info == 0){
m_error.code = 4243; m_error.code = 4243;
return 0; return 0;
} }
NdbTableImpl * tab = info->m_table_impl;
if(tab->m_indexType == NdbDictionary::Index::Undefined){ if(tab->m_indexType == NdbDictionary::Index::Undefined){
// Not an index // Not an index
......
...@@ -390,8 +390,8 @@ public: ...@@ -390,8 +390,8 @@ public:
int listObjects(List& list, NdbDictionary::Object::Type type); int listObjects(List& list, NdbDictionary::Object::Type type);
int listIndexes(List& list, const char * tableName); int listIndexes(List& list, const char * tableName);
NdbTableImpl * getTable(const char * tableName); NdbTableImpl * getTable(const char * tableName, void **data= 0);
NdbTableImpl * getTableImpl(const char * internalName); Ndb_local_table_info * get_local_table_info(const char * internalName);
NdbIndexImpl * getIndex(const char * indexName, NdbIndexImpl * getIndex(const char * indexName,
const char * tableName); const char * tableName);
NdbIndexImpl * getIndexImpl(const char * name, const char * internalName); NdbIndexImpl * getIndexImpl(const char * name, const char * internalName);
...@@ -400,6 +400,7 @@ public: ...@@ -400,6 +400,7 @@ public:
const NdbError & getNdbError() const; const NdbError & getNdbError() const;
NdbError m_error; NdbError m_error;
Uint32 m_local_table_data_size;
LocalDictCache m_localHash; LocalDictCache m_localHash;
GlobalDictCache * m_globalHash; GlobalDictCache * m_globalHash;
...@@ -410,6 +411,8 @@ public: ...@@ -410,6 +411,8 @@ public:
NdbDictInterface m_receiver; NdbDictInterface m_receiver;
Ndb & m_ndb; Ndb & m_ndb;
private:
Ndb_local_table_info * fetchGlobalTableImpl(const char * internalName);
}; };
inline inline
...@@ -598,45 +601,28 @@ NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){ ...@@ -598,45 +601,28 @@ NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){
inline inline
NdbTableImpl * NdbTableImpl *
NdbDictionaryImpl::getTable(const char * tableName) NdbDictionaryImpl::getTable(const char * tableName, void **data)
{ {
const char * internalTableName = m_ndb.internalizeTableName(tableName); const char * internalTableName = m_ndb.internalizeTableName(tableName);
Ndb_local_table_info *info= get_local_table_info(internalTableName);
return getTableImpl(internalTableName); if (info == 0) {
return 0;
}
if (data) {
*data= info->m_local_data;
}
return info->m_table_impl;
} }
inline inline
NdbTableImpl * Ndb_local_table_info *
NdbDictionaryImpl::getTableImpl(const char * internalTableName) NdbDictionaryImpl::get_local_table_info(const char * internalTableName)
{ {
NdbTableImpl *ret = m_localHash.get(internalTableName); Ndb_local_table_info *info= m_localHash.get(internalTableName);
if (info != 0) {
if (ret != 0) { return info; // autoincrement already initialized
return ret; // autoincrement already initialized
} }
return fetchGlobalTableImpl(internalTableName);
m_globalHash->lock();
ret = m_globalHash->get(internalTableName);
m_globalHash->unlock();
if (ret == 0){
ret = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames());
m_globalHash->lock();
m_globalHash->put(internalTableName, ret);
m_globalHash->unlock();
if(ret == 0){
return 0;
}
}
m_localHash.put(internalTableName, ret);
m_ndb.theFirstTupleId[ret->getTableId()] = ~0;
m_ndb.theLastTupleId[ret->getTableId()] = ~0;
addBlobTables(*ret);
return ret;
} }
inline inline
...@@ -654,9 +640,9 @@ NdbDictionaryImpl::getIndex(const char * indexName, ...@@ -654,9 +640,9 @@ NdbDictionaryImpl::getIndex(const char * indexName,
internalIndexName = m_ndb.internalizeTableName(indexName); // Index is also a table internalIndexName = m_ndb.internalizeTableName(indexName); // Index is also a table
} }
if (internalIndexName) { if (internalIndexName) {
NdbTableImpl * tab = getTableImpl(internalIndexName); Ndb_local_table_info * info = get_local_table_info(internalIndexName);
if (info) {
if (tab) { NdbTableImpl * tab = info->m_table_impl;
if (tab->m_index == 0) if (tab->m_index == 0)
tab->m_index = getIndexImpl(indexName, internalIndexName); tab->m_index = getIndexImpl(indexName, internalIndexName);
if (tab->m_index != 0) if (tab->m_index != 0)
......
...@@ -59,7 +59,7 @@ public: ...@@ -59,7 +59,7 @@ public:
void releaseHashTable(void); void releaseHashTable(void);
int insertKey(const char * str, Uint32 len, Uint32 lkey1, C* data); int insertKey(const char * str, Uint32 len, Uint32 lkey1, C* data);
int deleteKey(const char * str, Uint32 len); C *deleteKey(const char * str, Uint32 len);
C* getData(const char *, Uint32); C* getData(const char *, Uint32);
Uint32* getKey(const char *, Uint32); Uint32* getKey(const char *, Uint32);
...@@ -277,7 +277,7 @@ NdbLinHash<C>::getData( const char* str, Uint32 len ){ ...@@ -277,7 +277,7 @@ NdbLinHash<C>::getData( const char* str, Uint32 len ){
template <class C> template <class C>
inline inline
int C *
NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){ NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){
const Uint32 hash = Hash(str, len); const Uint32 hash = Hash(str, len);
int dir, seg; int dir, seg;
...@@ -288,19 +288,21 @@ NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){ ...@@ -288,19 +288,21 @@ NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){
for(NdbElement_t<C> * chain = *chainp; chain != 0; chain = chain->next){ for(NdbElement_t<C> * chain = *chainp; chain != 0; chain = chain->next){
if(chain->len == len && !memcmp(chain->str, str, len)){ if(chain->len == len && !memcmp(chain->str, str, len)){
if (oldChain == 0) { if (oldChain == 0) {
C *data= chain->theData;
delete chain; delete chain;
* chainp = 0; * chainp = 0;
return 1; return data;
} else { } else {
C *data= chain->theData;
oldChain->next = chain->next; oldChain->next = chain->next;
delete chain; delete chain;
return 1; return data;
} }
} else { } else {
oldChain = chain; oldChain = chain;
} }
} }
return -1; /* Element doesn't exist */ return 0; /* Element doesn't exist */
} }
template <class C> template <class C>
......
...@@ -87,7 +87,8 @@ static int unpackfrm(const void **data, uint *len, ...@@ -87,7 +87,8 @@ static int unpackfrm(const void **data, uint *len,
const void* pack_data); const void* pack_data);
static int ndb_get_table_statistics(Ndb*, const char *, static int ndb_get_table_statistics(Ndb*, const char *,
Uint64* rows, Uint64* commits); Uint64* rows, Uint64* commits);
/* /*
Error handling functions Error handling functions
...@@ -137,6 +138,86 @@ static int ndb_to_mysql_error(const NdbError *err) ...@@ -137,6 +138,86 @@ static int ndb_to_mysql_error(const NdbError *err)
} }
/*
Place holder for ha_ndbcluster thread specific data
*/
Thd_ndb::Thd_ndb()
{
ndb= new Ndb(g_ndb_cluster_connection, "");
lock_count= 0;
count= 0;
}
Thd_ndb::~Thd_ndb()
{
if (ndb)
delete ndb;
}
/*
* manage uncommitted insert/deletes during transactio to get records correct
*/
struct Ndb_table_local_info {
int no_uncommitted_rows_count;
ulong last_count;
ha_rows records;
};
void ha_ndbcluster::records_update()
{
DBUG_ENTER("ha_ndbcluster::records_update");
struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
if (info->records == ~(ha_rows)0)
{
Uint64 rows;
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
info->records= rows;
}
}
records= info->records+ info->no_uncommitted_rows_count;
DBUG_VOID_RETURN;
}
void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
{
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init");
struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb;
if (info->last_count != thd_ndb->count)
{
info->last_count = thd_ndb->count;
info->no_uncommitted_rows_count= 0;
info->records= ~(ha_rows)0;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
}
DBUG_VOID_RETURN;
}
void ha_ndbcluster::no_uncommitted_rows_update(int c)
{
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
info->no_uncommitted_rows_count));
DBUG_VOID_RETURN;
}
void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd)
{
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset");
((Thd_ndb*)(thd->transaction.thd_ndb))->count++;
DBUG_VOID_RETURN;
}
/* /*
Take care of the error that occured in NDB Take care of the error that occured in NDB
...@@ -145,6 +226,7 @@ static int ndb_to_mysql_error(const NdbError *err) ...@@ -145,6 +226,7 @@ static int ndb_to_mysql_error(const NdbError *err)
# The mapped error code # The mapped error code
*/ */
int ha_ndbcluster::ndb_err(NdbConnection *trans) int ha_ndbcluster::ndb_err(NdbConnection *trans)
{ {
int res; int res;
...@@ -506,7 +588,7 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -506,7 +588,7 @@ int ha_ndbcluster::get_metadata(const char *path)
DBUG_ENTER("get_metadata"); DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
if (!(tab= dict->getTable(m_tabname))) if (!(tab= dict->getTable(m_tabname, &m_table_info)))
ERR_RETURN(dict->getNdbError()); ERR_RETURN(dict->getNdbError());
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
...@@ -556,10 +638,6 @@ int ha_ndbcluster::get_metadata(const char *path) ...@@ -556,10 +638,6 @@ int ha_ndbcluster::get_metadata(const char *path)
// All checks OK, lets use the table // All checks OK, lets use the table
m_table= (void*)tab; m_table= (void*)tab;
Uint64 rows;
if(false && ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
records= rows;
}
DBUG_RETURN(build_index_list(table, ILBP_OPEN)); DBUG_RETURN(build_index_list(table, ILBP_OPEN));
} }
...@@ -1480,6 +1558,7 @@ int ha_ndbcluster::write_row(byte *record) ...@@ -1480,6 +1558,7 @@ int ha_ndbcluster::write_row(byte *record)
Find out how this is detected! Find out how this is detected!
*/ */
rows_inserted++; rows_inserted++;
no_uncommitted_rows_update(1);
bulk_insert_not_flushed= true; bulk_insert_not_flushed= true;
if ((rows_to_insert == 1) || if ((rows_to_insert == 1) ||
((rows_inserted % bulk_insert_rows) == 0) || ((rows_inserted % bulk_insert_rows) == 0) ||
...@@ -1701,6 +1780,8 @@ int ha_ndbcluster::delete_row(const byte *record) ...@@ -1701,6 +1780,8 @@ int ha_ndbcluster::delete_row(const byte *record)
ERR_RETURN(trans->getNdbError()); ERR_RETURN(trans->getNdbError());
ops_pending++; ops_pending++;
no_uncommitted_rows_update(-1);
// If deleting from cursor, NoCommit will be handled in next_result // If deleting from cursor, NoCommit will be handled in next_result
DBUG_RETURN(0); DBUG_RETURN(0);
} }
...@@ -1711,6 +1792,8 @@ int ha_ndbcluster::delete_row(const byte *record) ...@@ -1711,6 +1792,8 @@ int ha_ndbcluster::delete_row(const byte *record)
op->deleteTuple() != 0) op->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError()); ERR_RETURN(trans->getNdbError());
no_uncommitted_rows_update(-1);
if (table->primary_key == MAX_KEY) if (table->primary_key == MAX_KEY)
{ {
// This table has no primary key, use "hidden" primary key // This table has no primary key, use "hidden" primary key
...@@ -2259,7 +2342,10 @@ void ha_ndbcluster::info(uint flag) ...@@ -2259,7 +2342,10 @@ void ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_CONST) if (flag & HA_STATUS_CONST)
DBUG_PRINT("info", ("HA_STATUS_CONST")); DBUG_PRINT("info", ("HA_STATUS_CONST"));
if (flag & HA_STATUS_VARIABLE) if (flag & HA_STATUS_VARIABLE)
{
DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
records_update();
}
if (flag & HA_STATUS_ERRKEY) if (flag & HA_STATUS_ERRKEY)
{ {
DBUG_PRINT("info", ("HA_STATUS_ERRKEY")); DBUG_PRINT("info", ("HA_STATUS_ERRKEY"));
...@@ -2558,9 +2644,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2558,9 +2644,6 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
NdbConnection* trans= NULL; NdbConnection* trans= NULL;
DBUG_ENTER("external_lock"); DBUG_ENTER("external_lock");
DBUG_PRINT("enter", ("transaction.ndb_lock_count: %d",
thd->transaction.ndb_lock_count));
/* /*
Check that this handler instance has a connection Check that this handler instance has a connection
set up to the Ndb object of thd set up to the Ndb object of thd
...@@ -2568,10 +2651,15 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2568,10 +2651,15 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
if (check_ndb_connection()) if (check_ndb_connection())
DBUG_RETURN(1); DBUG_RETURN(1);
Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d",
thd_ndb->lock_count));
if (lock_type != F_UNLCK) if (lock_type != F_UNLCK)
{ {
DBUG_PRINT("info", ("lock_type != F_UNLCK")); DBUG_PRINT("info", ("lock_type != F_UNLCK"));
if (!thd->transaction.ndb_lock_count++) if (!thd_ndb->lock_count++)
{ {
PRINT_OPTION_FLAGS(thd); PRINT_OPTION_FLAGS(thd);
...@@ -2584,6 +2672,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2584,6 +2672,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= m_ndb->startTransaction(); trans= m_ndb->startTransaction();
if (trans == NULL) if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError()); ERR_RETURN(m_ndb->getNdbError());
no_uncommitted_rows_reset(thd);
thd->transaction.stmt.ndb_tid= trans; thd->transaction.stmt.ndb_tid= trans;
} }
else else
...@@ -2597,6 +2686,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2597,6 +2686,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
trans= m_ndb->startTransaction(); trans= m_ndb->startTransaction();
if (trans == NULL) if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError()); ERR_RETURN(m_ndb->getNdbError());
no_uncommitted_rows_reset(thd);
/* /*
If this is the start of a LOCK TABLE, a table look If this is the start of a LOCK TABLE, a table look
...@@ -2633,11 +2723,12 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) ...@@ -2633,11 +2723,12 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
// Start of transaction // Start of transaction
retrieve_all_fields= FALSE; retrieve_all_fields= FALSE;
ops_pending= 0; ops_pending= 0;
no_uncommitted_rows_init(thd);
} }
else else
{ {
DBUG_PRINT("info", ("lock_type == F_UNLCK")); DBUG_PRINT("info", ("lock_type == F_UNLCK"));
if (!--thd->transaction.ndb_lock_count) if (!--thd_ndb->lock_count)
{ {
DBUG_PRINT("trans", ("Last external_lock")); DBUG_PRINT("trans", ("Last external_lock"));
PRINT_OPTION_FLAGS(thd); PRINT_OPTION_FLAGS(thd);
...@@ -2696,6 +2787,7 @@ int ha_ndbcluster::start_stmt(THD *thd) ...@@ -2696,6 +2787,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
trans= m_ndb->startTransaction(); trans= m_ndb->startTransaction();
if (trans == NULL) if (trans == NULL)
ERR_RETURN(m_ndb->getNdbError()); ERR_RETURN(m_ndb->getNdbError());
no_uncommitted_rows_reset(thd);
thd->transaction.stmt.ndb_tid= trans; thd->transaction.stmt.ndb_tid= trans;
} }
m_active_trans= trans; m_active_trans= trans;
...@@ -2715,7 +2807,7 @@ int ha_ndbcluster::start_stmt(THD *thd) ...@@ -2715,7 +2807,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
int ndbcluster_commit(THD *thd, void *ndb_transaction) int ndbcluster_commit(THD *thd, void *ndb_transaction)
{ {
int res= 0; int res= 0;
Ndb *ndb= (Ndb*)thd->transaction.ndb; Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb;
NdbConnection *trans= (NdbConnection*)ndb_transaction; NdbConnection *trans= (NdbConnection*)ndb_transaction;
DBUG_ENTER("ndbcluster_commit"); DBUG_ENTER("ndbcluster_commit");
...@@ -2733,7 +2825,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) ...@@ -2733,7 +2825,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction)
if (res != -1) if (res != -1)
ndbcluster_print_error(res, error_op); ndbcluster_print_error(res, error_op);
} }
ndb->closeTransaction(trans); ndb->closeTransaction(trans);
DBUG_RETURN(res); DBUG_RETURN(res);
} }
...@@ -2745,7 +2837,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) ...@@ -2745,7 +2837,7 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction)
int ndbcluster_rollback(THD *thd, void *ndb_transaction) int ndbcluster_rollback(THD *thd, void *ndb_transaction)
{ {
int res= 0; int res= 0;
Ndb *ndb= (Ndb*)thd->transaction.ndb; Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb;
NdbConnection *trans= (NdbConnection*)ndb_transaction; NdbConnection *trans= (NdbConnection*)ndb_transaction;
DBUG_ENTER("ndbcluster_rollback"); DBUG_ENTER("ndbcluster_rollback");
...@@ -3222,9 +3314,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): ...@@ -3222,9 +3314,9 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_active_cursor(NULL), m_active_cursor(NULL),
m_ndb(NULL), m_ndb(NULL),
m_table(NULL), m_table(NULL),
m_table_info(NULL),
m_table_flags(HA_REC_NOT_IN_SEQ | m_table_flags(HA_REC_NOT_IN_SEQ |
HA_NULL_IN_KEY | HA_NULL_IN_KEY |
HA_NOT_EXACT_COUNT |
HA_NO_PREFIX_CHAR_KEYS), HA_NO_PREFIX_CHAR_KEYS),
m_share(0), m_share(0),
m_use_write(false), m_use_write(false),
...@@ -3247,9 +3339,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): ...@@ -3247,9 +3339,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_tabname[0]= '\0'; m_tabname[0]= '\0';
m_dbname[0]= '\0'; m_dbname[0]= '\0';
// TODO Adjust number of records and other parameters for proper records= ~(ha_rows)0; // uninitialized
// selection of scan/pk access
records= 100;
block_size= 1024; block_size= 1024;
for (i= 0; i < MAX_KEY; i++) for (i= 0; i < MAX_KEY; i++)
...@@ -3344,41 +3434,44 @@ int ha_ndbcluster::close(void) ...@@ -3344,41 +3434,44 @@ int ha_ndbcluster::close(void)
} }
Ndb* ha_ndbcluster::seize_ndb() Thd_ndb* ha_ndbcluster::seize_thd_ndb()
{ {
Ndb* ndb; Thd_ndb *thd_ndb;
DBUG_ENTER("seize_ndb"); DBUG_ENTER("seize_thd_ndb");
#ifdef USE_NDB_POOL #ifdef USE_NDB_POOL
// Seize from pool // Seize from pool
ndb= Ndb::seize(); ndb= Ndb::seize();
xxxxxxxxxxxxxx error
#else #else
ndb= new Ndb(g_ndb_cluster_connection, ""); thd_ndb= new Thd_ndb();
#endif #endif
if (ndb->init(max_transactions) != 0) thd_ndb->ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info));
if (thd_ndb->ndb->init(max_transactions) != 0)
{ {
ERR_PRINT(ndb->getNdbError()); ERR_PRINT(thd_ndb->ndb->getNdbError());
/* /*
TODO TODO
Alt.1 If init fails because to many allocated Ndb Alt.1 If init fails because to many allocated Ndb
wait on condition for a Ndb object to be released. wait on condition for a Ndb object to be released.
Alt.2 Seize/release from pool, wait until next release Alt.2 Seize/release from pool, wait until next release
*/ */
delete ndb; delete thd_ndb;
ndb= NULL; thd_ndb= NULL;
} }
DBUG_RETURN(ndb); DBUG_RETURN(thd_ndb);
} }
void ha_ndbcluster::release_ndb(Ndb* ndb) void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb)
{ {
DBUG_ENTER("release_ndb"); DBUG_ENTER("release_thd_ndb");
#ifdef USE_NDB_POOL #ifdef USE_NDB_POOL
// Release to pool // Release to pool
Ndb::release(ndb); Ndb::release(ndb);
xxxxxxxxxxxx error
#else #else
delete ndb; delete thd_ndb;
#endif #endif
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
...@@ -3397,29 +3490,31 @@ void ha_ndbcluster::release_ndb(Ndb* ndb) ...@@ -3397,29 +3490,31 @@ void ha_ndbcluster::release_ndb(Ndb* ndb)
int ha_ndbcluster::check_ndb_connection() int ha_ndbcluster::check_ndb_connection()
{ {
THD* thd= current_thd; THD *thd= current_thd;
Ndb* ndb; Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
DBUG_ENTER("check_ndb_connection"); DBUG_ENTER("check_ndb_connection");
if (!thd->transaction.ndb) if (!thd_ndb)
{ {
ndb= seize_ndb(); thd_ndb= seize_thd_ndb();
if (!ndb) if (!thd_ndb)
DBUG_RETURN(2); DBUG_RETURN(2);
thd->transaction.ndb= ndb; thd->transaction.thd_ndb= thd_ndb;
} }
m_ndb= (Ndb*)thd->transaction.ndb; m_ndb= thd_ndb->ndb;
m_ndb->setDatabaseName(m_dbname); m_ndb->setDatabaseName(m_dbname);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
void ndbcluster_close_connection(THD *thd) void ndbcluster_close_connection(THD *thd)
{ {
Ndb* ndb; Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
DBUG_ENTER("ndbcluster_close_connection"); DBUG_ENTER("ndbcluster_close_connection");
ndb= (Ndb*)thd->transaction.ndb; if (thd_ndb)
ha_ndbcluster::release_ndb(ndb); {
thd->transaction.ndb= NULL; ha_ndbcluster::release_thd_ndb(thd_ndb);
thd->transaction.thd_ndb= NULL;
}
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
...@@ -3438,6 +3533,7 @@ int ndbcluster_discover(const char *dbname, const char *name, ...@@ -3438,6 +3533,7 @@ int ndbcluster_discover(const char *dbname, const char *name,
DBUG_PRINT("enter", ("db: %s, name: %s", dbname, name)); DBUG_PRINT("enter", ("db: %s, name: %s", dbname, name));
Ndb ndb(g_ndb_cluster_connection, dbname); Ndb ndb(g_ndb_cluster_connection, dbname);
ndb.getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info));
if (ndb.init()) if (ndb.init())
ERR_RETURN(ndb.getNdbError()); ERR_RETURN(ndb.getNdbError());
...@@ -3528,6 +3624,7 @@ bool ndbcluster_init() ...@@ -3528,6 +3624,7 @@ bool ndbcluster_init()
// Create a Ndb object to open the connection to NDB // Create a Ndb object to open the connection to NDB
g_ndb= new Ndb(g_ndb_cluster_connection, "sys"); g_ndb= new Ndb(g_ndb_cluster_connection, "sys");
g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_table_local_info));
if (g_ndb->init() != 0) if (g_ndb->init() != 0)
{ {
ERR_PRINT (g_ndb->getNdbError()); ERR_PRINT (g_ndb->getNdbError());
...@@ -3553,6 +3650,7 @@ bool ndbcluster_init() ...@@ -3553,6 +3650,7 @@ bool ndbcluster_init()
(void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
(hash_get_key) ndbcluster_get_key,0,0); (hash_get_key) ndbcluster_get_key,0,0);
pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST);
ndbcluster_inited= 1; ndbcluster_inited= 1;
#ifdef USE_DISCOVER_ON_STARTUP #ifdef USE_DISCOVER_ON_STARTUP
if (ndb_discover_tables() != 0) if (ndb_discover_tables() != 0)
......
...@@ -63,6 +63,19 @@ typedef struct st_ndbcluster_share { ...@@ -63,6 +63,19 @@ typedef struct st_ndbcluster_share {
uint table_name_length,use_count; uint table_name_length,use_count;
} NDB_SHARE; } NDB_SHARE;
/*
Place holder for ha_ndbcluster thread specific data
*/
class Thd_ndb {
public:
Thd_ndb();
~Thd_ndb();
Ndb *ndb;
ulong count;
uint lock_count;
};
class ha_ndbcluster: public handler class ha_ndbcluster: public handler
{ {
public: public:
...@@ -147,8 +160,8 @@ class ha_ndbcluster: public handler ...@@ -147,8 +160,8 @@ class ha_ndbcluster: public handler
void start_bulk_insert(ha_rows rows); void start_bulk_insert(ha_rows rows);
int end_bulk_insert(); int end_bulk_insert();
static Ndb* seize_ndb(); static Thd_ndb* seize_thd_ndb();
static void release_ndb(Ndb* ndb); static void release_thd_ndb(Thd_ndb* thd_ndb);
uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; }
private: private:
...@@ -214,7 +227,8 @@ class ha_ndbcluster: public handler ...@@ -214,7 +227,8 @@ class ha_ndbcluster: public handler
NdbConnection *m_active_trans; NdbConnection *m_active_trans;
NdbResultSet *m_active_cursor; NdbResultSet *m_active_cursor;
Ndb *m_ndb; Ndb *m_ndb;
void *m_table; void *m_table;
void *m_table_info;
char m_dbname[FN_HEADLEN]; char m_dbname[FN_HEADLEN];
//char m_schemaname[FN_HEADLEN]; //char m_schemaname[FN_HEADLEN];
char m_tabname[FN_HEADLEN]; char m_tabname[FN_HEADLEN];
...@@ -238,6 +252,11 @@ class ha_ndbcluster: public handler ...@@ -238,6 +252,11 @@ class ha_ndbcluster: public handler
char *blobs_buffer; char *blobs_buffer;
uint32 blobs_buffer_size; uint32 blobs_buffer_size;
uint dupkey; uint dupkey;
void records_update();
void no_uncommitted_rows_update(int);
void no_uncommitted_rows_init(THD *);
void no_uncommitted_rows_reset(THD *);
}; };
bool ndbcluster_init(void); bool ndbcluster_init(void);
......
...@@ -764,9 +764,8 @@ public: ...@@ -764,9 +764,8 @@ public:
THD_TRANS all; // Trans since BEGIN WORK THD_TRANS all; // Trans since BEGIN WORK
THD_TRANS stmt; // Trans for current statement THD_TRANS stmt; // Trans for current statement
uint bdb_lock_count; uint bdb_lock_count;
uint ndb_lock_count;
#ifdef HAVE_NDBCLUSTER_DB #ifdef HAVE_NDBCLUSTER_DB
void* ndb; void* thd_ndb;
#endif #endif
bool on; bool on;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment