Commit 4ed658de authored by joreland@mysql.com's avatar joreland@mysql.com

wl2126 - fix ndb part of "latest" ingo patch

  (hopefully last :-))
parent 64ced8ca
......@@ -95,8 +95,6 @@ public:
SystemTable = 1, ///< System table
UserTable = 2, ///< User table (may be temporary)
UniqueHashIndex = 3, ///< Unique un-ordered hash index
HashIndex = 4, ///< Non-unique un-ordered hash index
UniqueOrderedIndex = 5, ///< Unique ordered index
OrderedIndex = 6, ///< Non-unique ordered index
HashIndexTrigger = 7, ///< Index maintenance, internal
IndexTrigger = 8, ///< Index maintenance, internal
......
......@@ -98,7 +98,7 @@ private:
Uint32 m_received_result_length;
bool nextResult() const { return m_current_row < m_result_rows; }
void copyout(NdbReceiver&);
NdbRecAttr* copyout(NdbReceiver&);
};
#ifdef NDB_NO_DROPPED_SIGNAL
......
......@@ -206,6 +206,7 @@ protected:
bool m_ordered;
bool m_descending;
Uint32 m_read_range_no;
NdbRecAttr *m_curr_row; // Pointer to last returned row
};
inline
......
......@@ -1110,8 +1110,6 @@ objectTypeMapping[] = {
{ DictTabInfo::SystemTable, NdbDictionary::Object::SystemTable },
{ DictTabInfo::UserTable, NdbDictionary::Object::UserTable },
{ DictTabInfo::UniqueHashIndex, NdbDictionary::Object::UniqueHashIndex },
{ DictTabInfo::HashIndex, NdbDictionary::Object::HashIndex },
{ DictTabInfo::UniqueOrderedIndex, NdbDictionary::Object::UniqueOrderedIndex },
{ DictTabInfo::OrderedIndex, NdbDictionary::Object::OrderedIndex },
{ DictTabInfo::HashIndexTrigger, NdbDictionary::Object::HashIndexTrigger },
{ DictTabInfo::IndexTrigger, NdbDictionary::Object::IndexTrigger },
......@@ -1143,8 +1141,6 @@ static const
ApiKernelMapping
indexTypeMapping[] = {
{ DictTabInfo::UniqueHashIndex, NdbDictionary::Index::UniqueHashIndex },
{ DictTabInfo::HashIndex, NdbDictionary::Index::HashIndex },
{ DictTabInfo::UniqueOrderedIndex, NdbDictionary::Index::UniqueOrderedIndex},
{ DictTabInfo::OrderedIndex, NdbDictionary::Index::OrderedIndex },
{ -1, -1 }
};
......@@ -2953,8 +2949,6 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
BaseString schemaName;
BaseString objectName;
if ((element.type == NdbDictionary::Object::UniqueHashIndex) ||
(element.type == NdbDictionary::Object::HashIndex) ||
(element.type == NdbDictionary::Object::UniqueOrderedIndex) ||
(element.type == NdbDictionary::Object::OrderedIndex)) {
char * indexName = new char[n << 2];
memcpy(indexName, &data[pos], n << 2);
......
......@@ -61,8 +61,6 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
case(NdbDictionary::Index::UniqueHashIndex):
break;
case(NdbDictionary::Index::Undefined):
case(NdbDictionary::Index::HashIndex):
case(NdbDictionary::Index::UniqueOrderedIndex):
case(NdbDictionary::Index::OrderedIndex):
setErrorCodeAbort(4003);
return -1;
......
......@@ -201,10 +201,11 @@ NdbReceiver::do_get_value(NdbReceiver * org,
return;
}
void
NdbRecAttr*
NdbReceiver::copyout(NdbReceiver & dstRec){
NdbRecAttr* src = m_rows[m_current_row++];
NdbRecAttr* dst = dstRec.theFirstRecAttr;
NdbRecAttr *src = m_rows[m_current_row++];
NdbRecAttr *dst = dstRec.theFirstRecAttr;
NdbRecAttr *start = src;
Uint32 tmp = m_hidden_count;
while(tmp--)
src = src->next();
......@@ -215,6 +216,8 @@ NdbReceiver::copyout(NdbReceiver & dstRec){
src = src->next();
dst = dst->next();
}
return start;
}
int
......
......@@ -160,8 +160,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
m_keyInfo = lockExcl ? 1 : 0;
bool range = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
m_accessTable->m_indexType == NdbDictionary::Index::UniqueOrderedIndex){
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex)
{
if (m_currentTable == m_accessTable){
// Old way of scanning indexes, should not be allowed
m_currentTable = theNdb->theDictionary->
......@@ -424,6 +424,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
int retVal = 2;
Uint32 idx = m_current_api_receiver;
Uint32 last = m_api_receivers_count;
m_curr_row = 0;
if(DEBUG_NEXT_RESULT)
ndbout_c("nextResult(%d) idx=%d last=%d", fetchAllowed, idx, last);
......@@ -434,7 +435,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
for(; idx < last; idx++){
NdbReceiver* tRec = m_api_receivers[idx];
if(tRec->nextResult()){
tRec->copyout(theReceiver);
m_curr_row = tRec->copyout(theReceiver);
retVal = 0;
break;
}
......@@ -510,7 +511,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
for(; idx < last; idx++){
NdbReceiver* tRec = m_api_receivers[idx];
if(tRec->nextResult()){
tRec->copyout(theReceiver);
m_curr_row = tRec->copyout(theReceiver);
retVal = 0;
break;
}
......@@ -845,6 +846,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
}
theStatus = WaitResponse;
m_curr_row = 0;
m_sent_receivers_count = theParallelism;
if(m_ordered)
{
......@@ -878,16 +880,9 @@ NdbScanOperation::doSendScan(int aProcessorId)
int
NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
{
Uint32 idx = m_current_api_receiver;
Uint32 last = m_api_receivers_count;
Uint32 row;
NdbReceiver * tRec;
NdbRecAttr * tRecAttr;
if(idx < last && (tRec = m_api_receivers[idx])
&& ((row = tRec->m_current_row) <= tRec->m_defined_rows)
&& (tRecAttr = tRec->m_rows[row-1])){
NdbRecAttr * tRecAttr = m_curr_row;
if(tRecAttr)
{
const Uint32 * src = (Uint32*)tRecAttr->aRef();
memcpy(data, src, 4*size);
return 0;
......@@ -896,18 +891,12 @@ NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
}
NdbOperation*
NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans){
Uint32 idx = m_current_api_receiver;
Uint32 last = m_api_receivers_count;
Uint32 row;
NdbReceiver * tRec;
NdbRecAttr * tRecAttr;
if(idx < last && (tRec = m_api_receivers[idx])
&& ((row = tRec->m_current_row) <= tRec->m_defined_rows)
&& (tRecAttr = tRec->m_rows[row-1])){
NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans)
{
NdbRecAttr * tRecAttr = m_curr_row;
if(tRecAttr)
{
NdbOperation * newOp = pTrans->getNdbOperation(m_currentTable);
if (newOp == NULL){
return NULL;
......@@ -1302,6 +1291,7 @@ int
NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
bool forceSend){
m_curr_row = 0;
Uint32 u_idx = 0, u_last = 0;
Uint32 s_idx = m_current_api_receiver; // first sorted
Uint32 s_last = theParallelism; // last sorted
......@@ -1412,7 +1402,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
tRec = m_api_receivers[s_idx];
if(s_idx < s_last && tRec->nextResult()){
tRec->copyout(theReceiver);
m_curr_row = tRec->copyout(theReceiver);
if(DEBUG_NEXT_RESULT) ndbout_c("return 0");
return 0;
}
......@@ -1667,23 +1657,13 @@ NdbIndexScanOperation::end_of_bound(Uint32 no)
int
NdbIndexScanOperation::get_range_no()
{
if(m_read_range_no)
NdbRecAttr* tRecAttr = m_curr_row;
if(m_read_range_no && tRecAttr)
{
Uint32 idx = m_current_api_receiver;
Uint32 last = m_api_receivers_count;
Uint32 row;
NdbReceiver * tRec;
NdbRecAttr * tRecAttr;
if(idx < last && (tRec = m_api_receivers[idx])
&& ((row = tRec->m_current_row) <= tRec->m_defined_rows)
&& (tRecAttr = tRec->m_rows[row-1])){
if(m_keyInfo)
tRecAttr = tRecAttr->next();
Uint32 ret = *(Uint32*)tRecAttr->aRef();
return ret;
}
}
return -1;
}
......@@ -3843,7 +3843,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NULL_IN_KEY |
HA_AUTO_PART_KEY |
HA_NO_VARCHAR |
HA_NO_PREFIX_CHAR_KEYS),
HA_NO_PREFIX_CHAR_KEYS |
HA_NEED_READ_RANGE_BUFFER),
m_share(0),
m_use_write(FALSE),
m_ignore_dup_key(FALSE),
......@@ -4829,18 +4830,16 @@ int ha_ndbcluster::write_ndb_file()
DBUG_RETURN(error);
}
#ifdef key_multi_range
int
ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
key_multi_range *ranges,
ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges,
uint range_count,
bool sorted,
handler_buffer *buffer)
HANDLER_BUFFER *buffer)
{
DBUG_ENTER("ha_ndbcluster::read_multi_range_first");
int res;
uint i;
KEY* key_info= table->key_info + active_index;
NDB_INDEX_TYPE index_type= get_index_type(active_index);
ulong reclength= table->reclength;
......@@ -4864,8 +4863,9 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
/**
* Copy arguments into member variables
*/
multi_ranges= ranges;
multi_range_count= range_count;
m_multi_ranges= ranges;
multi_range_curr= ranges;
multi_range_end= ranges+range_count;
multi_range_sorted= sorted;
multi_range_buffer= buffer;
......@@ -4893,18 +4893,19 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
const NDBINDEX *idx= (NDBINDEX *) m_index[active_index].index;
const NdbOperation* lastOp= m_active_trans->getLastDefinedOperation();
NdbIndexScanOperation* scanOp= 0;
for(i= 0; i<range_count && curr+reclength <= end_of_buffer; i++)
for(; multi_range_curr<multi_range_end && curr+reclength <= end_of_buffer;
multi_range_curr++)
{
switch(index_type){
case PRIMARY_KEY_INDEX:
pk:
{
ranges[i].range_flag |= UNIQUE_RANGE;
multi_range_curr->range_flag |= UNIQUE_RANGE;
if ((op= m_active_trans->getNdbOperation(tab)) &&
!op->readTuple(lm) &&
!set_primary_key(op, ranges[i].start_key.key) &&
!set_primary_key(op, multi_range_curr->start_key.key) &&
!define_read_attrs(curr, op) &&
(op->setAbortOption(IgnoreError), true))
(op->setAbortOption(AO_IgnoreError), true))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
......@@ -4914,32 +4915,32 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
case UNIQUE_INDEX:
sk:
{
ranges[i].range_flag |= UNIQUE_RANGE;
multi_range_curr->range_flag |= UNIQUE_RANGE;
if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) &&
!op->readTuple(lm) &&
!set_index_key(op, key_info, ranges[i].start_key.key) &&
!set_index_key(op, key_info, multi_range_curr->start_key.key) &&
!define_read_attrs(curr, op) &&
(op->setAbortOption(IgnoreError), true))
(op->setAbortOption(AO_IgnoreError), true))
curr += reclength;
else
ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError());
break;
}
case PRIMARY_KEY_ORDERED_INDEX:
if (ranges[i].start_key.length == key_info->key_length &&
ranges[i].start_key.flag == HA_READ_KEY_EXACT)
if (multi_range_curr->start_key.length == key_info->key_length &&
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT)
goto pk;
goto range;
case UNIQUE_ORDERED_INDEX:
if (ranges[i].start_key.length == key_info->key_length &&
ranges[i].start_key.flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, ranges[i].start_key.key,
ranges[i].start_key.length))
if (multi_range_curr->start_key.length == key_info->key_length &&
multi_range_curr->start_key.flag == HA_READ_KEY_EXACT &&
!check_null_in_key(key_info, multi_range_curr->start_key.key,
multi_range_curr->start_key.length))
goto sk;
goto range;
case ORDERED_INDEX:
range:
ranges[i].range_flag &= ~(uint)UNIQUE_RANGE;
multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE;
if (scanOp == 0)
{
if (m_multi_cursor)
......@@ -4954,8 +4955,8 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
end_of_buffer -= reclength;
}
else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab))
&& !scanOp->readTuples(lm, 0, parallelism, sorted, false, true) &&
!define_read_attrs(end_of_buffer-reclength, scanOp))
&&!scanOp->readTuples(lm, 0, parallelism, sorted, false, true)
&&!define_read_attrs(end_of_buffer-reclength, scanOp))
{
m_multi_cursor= scanOp;
m_multi_range_cursor_result_ptr= end_of_buffer-reclength;
......@@ -4966,14 +4967,15 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
m_active_trans->getNdbError());
}
}
const key_range *keys[2]= { &ranges[i].start_key, &ranges[i].end_key };
if ((res= set_bounds(scanOp, keys, i)))
const key_range *keys[2]= { &multi_range_curr->start_key,
&multi_range_curr->end_key };
if ((res= set_bounds(scanOp, keys, multi_range_curr-ranges)))
DBUG_RETURN(res);
break;
}
}
if (i != range_count)
if (multi_range_curr != multi_range_end)
{
/**
* Mark that we're using entire buffer (even if might not) as
......@@ -4995,8 +4997,8 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation();
if (!(res= execute_no_commit_ie(this, m_active_trans)))
{
multi_range_curr= 0;
m_multi_range_defined_count= i;
m_multi_range_defined= multi_range_curr;
multi_range_curr= ranges;
m_multi_range_result_ptr= (byte*)buffer->buffer;
DBUG_RETURN(read_multi_range_next(found_range_p));
}
......@@ -5010,7 +5012,7 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
#endif
int
ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p)
{
DBUG_ENTER("ha_ndbcluster::read_multi_range_next");
if (m_disable_multi_read)
......@@ -5022,9 +5024,9 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
int range_no;
ulong reclength= table->reclength;
const NdbOperation* op= m_current_multi_operation;
for(;multi_range_curr < m_multi_range_defined_count; multi_range_curr++)
for(;multi_range_curr < m_multi_range_defined; multi_range_curr++)
{
if (multi_ranges[multi_range_curr].range_flag & UNIQUE_RANGE)
if (multi_range_curr->range_flag & UNIQUE_RANGE)
{
if (op->getNdbError().code == 0)
goto found_next;
......@@ -5056,13 +5058,14 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
}
range_no= m_multi_cursor->get_range_no();
if (range_no == multi_range_curr)
uint current_range_no= multi_range_curr - m_multi_ranges;
if (range_no == current_range_no)
{
DBUG_MULTI_RANGE(4);
// return current row
goto found;
}
else if (range_no > (int)multi_range_curr)
else if (range_no > (int)current_range_no)
{
DBUG_MULTI_RANGE(5);
// wait with current row
......@@ -5107,16 +5110,15 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
}
}
if (multi_range_curr == multi_range_count)
if (multi_range_curr == multi_range_end)
DBUG_RETURN(HA_ERR_END_OF_FILE);
/**
* Read remaining ranges
*/
uint left= multi_range_count - multi_range_curr;
DBUG_RETURN(read_multi_range_first(multi_range_found_p,
multi_ranges + multi_range_curr,
left,
multi_range_curr,
multi_range_end - multi_range_curr,
multi_range_sorted,
multi_range_buffer));
......@@ -5125,7 +5127,7 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
* Found a record belonging to a scan
*/
m_active_cursor= m_multi_cursor;
* multi_range_found_p= multi_ranges + range_no;
* multi_range_found_p= m_multi_ranges + range_no;
memcpy(table->record[0], m_multi_range_cursor_result_ptr, reclength);
setup_recattr(m_active_cursor->getFirstRecAttr());
unpack_record(table->record[0]);
......@@ -5137,7 +5139,7 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
* Found a record belonging to a pk/index op,
* copy result and move to next to prepare for next call
*/
* multi_range_found_p= multi_ranges + multi_range_curr;
* multi_range_found_p= multi_range_curr;
memcpy(table->record[0], m_multi_range_result_ptr, reclength);
setup_recattr(op->getFirstRecAttr());
unpack_record(table->record[0]);
......@@ -5171,6 +5173,5 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
DBUG_RETURN(0);
}
#endif
#endif /* HAVE_NDBCLUSTER_DB */
......@@ -114,10 +114,10 @@ class ha_ndbcluster: public handler
/**
* Multi range stuff
*/
int read_multi_range_first(struct key_multi_range **found_range_p,
struct key_multi_range *ranges, uint range_count,
bool sorted, struct handler_buffer *buffer);
int read_multi_range_next(struct key_multi_range **found_range_p);
int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE*ranges, uint range_count,
bool sorted, HANDLER_BUFFER *buffer);
int read_multi_range_next(KEY_MULTI_RANGE **found_range_p);
bool get_error_message(int error, String *buf);
void info(uint);
......@@ -258,7 +258,8 @@ class ha_ndbcluster: public handler
bool m_disable_multi_read;
byte *m_multi_range_result_ptr;
uint m_multi_range_defined_count;
KEY_MULTI_RANGE *m_multi_ranges;
KEY_MULTI_RANGE *m_multi_range_defined;
const NdbOperation *m_current_multi_operation;
NdbIndexScanOperation *m_multi_cursor;
byte *m_multi_range_cursor_result_ptr;
......
......@@ -5947,7 +5947,7 @@ int QUICK_RANGE_SELECT::get_next_init(void)
if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
{
mrange_bufsiz= min(multi_range_bufsiz,
QUICK_SELECT_I::records * head->reclength);
(QUICK_SELECT_I::records + 1)* head->reclength);
while (mrange_bufsiz &&
! my_multi_malloc(MYF(MY_WME),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment