Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
4ed658de
Commit
4ed658de
authored
Dec 28, 2004
by
joreland@mysql.com
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
wl2126 - fix ndb part of "latest" ingo patch
(hopefully last :-))
parent
64ced8ca
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
79 additions
and
103 deletions
+79
-103
ndb/include/ndbapi/NdbDictionary.hpp
ndb/include/ndbapi/NdbDictionary.hpp
+0
-2
ndb/include/ndbapi/NdbReceiver.hpp
ndb/include/ndbapi/NdbReceiver.hpp
+1
-1
ndb/include/ndbapi/NdbScanOperation.hpp
ndb/include/ndbapi/NdbScanOperation.hpp
+1
-0
ndb/src/ndbapi/NdbDictionaryImpl.cpp
ndb/src/ndbapi/NdbDictionaryImpl.cpp
+0
-6
ndb/src/ndbapi/NdbIndexOperation.cpp
ndb/src/ndbapi/NdbIndexOperation.cpp
+0
-2
ndb/src/ndbapi/NdbReceiver.cpp
ndb/src/ndbapi/NdbReceiver.cpp
+6
-3
ndb/src/ndbapi/NdbScanOperation.cpp
ndb/src/ndbapi/NdbScanOperation.cpp
+22
-42
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+42
-41
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+6
-5
sql/opt_range.cc
sql/opt_range.cc
+1
-1
No files found.
ndb/include/ndbapi/NdbDictionary.hpp
View file @
4ed658de
...
...
@@ -95,8 +95,6 @@ public:
SystemTable
=
1
,
///< System table
UserTable
=
2
,
///< User table (may be temporary)
UniqueHashIndex
=
3
,
///< Unique un-ordered hash index
HashIndex
=
4
,
///< Non-unique un-ordered hash index
UniqueOrderedIndex
=
5
,
///< Unique ordered index
OrderedIndex
=
6
,
///< Non-unique ordered index
HashIndexTrigger
=
7
,
///< Index maintenance, internal
IndexTrigger
=
8
,
///< Index maintenance, internal
...
...
ndb/include/ndbapi/NdbReceiver.hpp
View file @
4ed658de
...
...
@@ -98,7 +98,7 @@ private:
Uint32
m_received_result_length
;
bool
nextResult
()
const
{
return
m_current_row
<
m_result_rows
;
}
void
copyout
(
NdbReceiver
&
);
NdbRecAttr
*
copyout
(
NdbReceiver
&
);
};
#ifdef NDB_NO_DROPPED_SIGNAL
...
...
ndb/include/ndbapi/NdbScanOperation.hpp
View file @
4ed658de
...
...
@@ -206,6 +206,7 @@ protected:
bool
m_ordered
;
bool
m_descending
;
Uint32
m_read_range_no
;
NdbRecAttr
*
m_curr_row
;
// Pointer to last returned row
};
inline
...
...
ndb/src/ndbapi/NdbDictionaryImpl.cpp
View file @
4ed658de
...
...
@@ -1110,8 +1110,6 @@ objectTypeMapping[] = {
{
DictTabInfo
::
SystemTable
,
NdbDictionary
::
Object
::
SystemTable
},
{
DictTabInfo
::
UserTable
,
NdbDictionary
::
Object
::
UserTable
},
{
DictTabInfo
::
UniqueHashIndex
,
NdbDictionary
::
Object
::
UniqueHashIndex
},
{
DictTabInfo
::
HashIndex
,
NdbDictionary
::
Object
::
HashIndex
},
{
DictTabInfo
::
UniqueOrderedIndex
,
NdbDictionary
::
Object
::
UniqueOrderedIndex
},
{
DictTabInfo
::
OrderedIndex
,
NdbDictionary
::
Object
::
OrderedIndex
},
{
DictTabInfo
::
HashIndexTrigger
,
NdbDictionary
::
Object
::
HashIndexTrigger
},
{
DictTabInfo
::
IndexTrigger
,
NdbDictionary
::
Object
::
IndexTrigger
},
...
...
@@ -1143,8 +1141,6 @@ static const
ApiKernelMapping
indexTypeMapping
[]
=
{
{
DictTabInfo
::
UniqueHashIndex
,
NdbDictionary
::
Index
::
UniqueHashIndex
},
{
DictTabInfo
::
HashIndex
,
NdbDictionary
::
Index
::
HashIndex
},
{
DictTabInfo
::
UniqueOrderedIndex
,
NdbDictionary
::
Index
::
UniqueOrderedIndex
},
{
DictTabInfo
::
OrderedIndex
,
NdbDictionary
::
Index
::
OrderedIndex
},
{
-
1
,
-
1
}
};
...
...
@@ -2953,8 +2949,6 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
BaseString
schemaName
;
BaseString
objectName
;
if
((
element
.
type
==
NdbDictionary
::
Object
::
UniqueHashIndex
)
||
(
element
.
type
==
NdbDictionary
::
Object
::
HashIndex
)
||
(
element
.
type
==
NdbDictionary
::
Object
::
UniqueOrderedIndex
)
||
(
element
.
type
==
NdbDictionary
::
Object
::
OrderedIndex
))
{
char
*
indexName
=
new
char
[
n
<<
2
];
memcpy
(
indexName
,
&
data
[
pos
],
n
<<
2
);
...
...
ndb/src/ndbapi/NdbIndexOperation.cpp
View file @
4ed658de
...
...
@@ -61,8 +61,6 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
case
(
NdbDictionary
:
:
Index
::
UniqueHashIndex
)
:
break
;
case
(
NdbDictionary
:
:
Index
::
Undefined
)
:
case
(
NdbDictionary
:
:
Index
::
HashIndex
)
:
case
(
NdbDictionary
:
:
Index
::
UniqueOrderedIndex
)
:
case
(
NdbDictionary
:
:
Index
::
OrderedIndex
)
:
setErrorCodeAbort
(
4003
);
return
-
1
;
...
...
ndb/src/ndbapi/NdbReceiver.cpp
View file @
4ed658de
...
...
@@ -201,10 +201,11 @@ NdbReceiver::do_get_value(NdbReceiver * org,
return
;
}
void
NdbRecAttr
*
NdbReceiver
::
copyout
(
NdbReceiver
&
dstRec
){
NdbRecAttr
*
src
=
m_rows
[
m_current_row
++
];
NdbRecAttr
*
dst
=
dstRec
.
theFirstRecAttr
;
NdbRecAttr
*
src
=
m_rows
[
m_current_row
++
];
NdbRecAttr
*
dst
=
dstRec
.
theFirstRecAttr
;
NdbRecAttr
*
start
=
src
;
Uint32
tmp
=
m_hidden_count
;
while
(
tmp
--
)
src
=
src
->
next
();
...
...
@@ -215,6 +216,8 @@ NdbReceiver::copyout(NdbReceiver & dstRec){
src
=
src
->
next
();
dst
=
dst
->
next
();
}
return
start
;
}
int
...
...
ndb/src/ndbapi/NdbScanOperation.cpp
View file @
4ed658de
...
...
@@ -160,8 +160,8 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
m_keyInfo
=
lockExcl
?
1
:
0
;
bool
range
=
false
;
if
(
m_accessTable
->
m_indexType
==
NdbDictionary
::
Index
::
OrderedIndex
||
m_accessTable
->
m_indexType
==
NdbDictionary
::
Index
::
UniqueOrderedIndex
)
{
if
(
m_accessTable
->
m_indexType
==
NdbDictionary
::
Index
::
OrderedIndex
)
{
if
(
m_currentTable
==
m_accessTable
){
// Old way of scanning indexes, should not be allowed
m_currentTable
=
theNdb
->
theDictionary
->
...
...
@@ -424,6 +424,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
int
retVal
=
2
;
Uint32
idx
=
m_current_api_receiver
;
Uint32
last
=
m_api_receivers_count
;
m_curr_row
=
0
;
if
(
DEBUG_NEXT_RESULT
)
ndbout_c
(
"nextResult(%d) idx=%d last=%d"
,
fetchAllowed
,
idx
,
last
);
...
...
@@ -434,7 +435,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
for
(;
idx
<
last
;
idx
++
){
NdbReceiver
*
tRec
=
m_api_receivers
[
idx
];
if
(
tRec
->
nextResult
()){
tRec
->
copyout
(
theReceiver
);
m_curr_row
=
tRec
->
copyout
(
theReceiver
);
retVal
=
0
;
break
;
}
...
...
@@ -510,7 +511,7 @@ int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
for
(;
idx
<
last
;
idx
++
){
NdbReceiver
*
tRec
=
m_api_receivers
[
idx
];
if
(
tRec
->
nextResult
()){
tRec
->
copyout
(
theReceiver
);
m_curr_row
=
tRec
->
copyout
(
theReceiver
);
retVal
=
0
;
break
;
}
...
...
@@ -845,6 +846,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
}
theStatus
=
WaitResponse
;
m_curr_row
=
0
;
m_sent_receivers_count
=
theParallelism
;
if
(
m_ordered
)
{
...
...
@@ -878,16 +880,9 @@ NdbScanOperation::doSendScan(int aProcessorId)
int
NdbScanOperation
::
getKeyFromKEYINFO20
(
Uint32
*
data
,
unsigned
size
)
{
Uint32
idx
=
m_current_api_receiver
;
Uint32
last
=
m_api_receivers_count
;
Uint32
row
;
NdbReceiver
*
tRec
;
NdbRecAttr
*
tRecAttr
;
if
(
idx
<
last
&&
(
tRec
=
m_api_receivers
[
idx
])
&&
((
row
=
tRec
->
m_current_row
)
<=
tRec
->
m_defined_rows
)
&&
(
tRecAttr
=
tRec
->
m_rows
[
row
-
1
])){
NdbRecAttr
*
tRecAttr
=
m_curr_row
;
if
(
tRecAttr
)
{
const
Uint32
*
src
=
(
Uint32
*
)
tRecAttr
->
aRef
();
memcpy
(
data
,
src
,
4
*
size
);
return
0
;
...
...
@@ -896,18 +891,12 @@ NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
}
NdbOperation
*
NdbScanOperation
::
takeOverScanOp
(
OperationType
opType
,
NdbTransaction
*
pTrans
){
Uint32
idx
=
m_current_api_receiver
;
Uint32
last
=
m_api_receivers_count
;
Uint32
row
;
NdbReceiver
*
tRec
;
NdbRecAttr
*
tRecAttr
;
if
(
idx
<
last
&&
(
tRec
=
m_api_receivers
[
idx
])
&&
((
row
=
tRec
->
m_current_row
)
<=
tRec
->
m_defined_rows
)
&&
(
tRecAttr
=
tRec
->
m_rows
[
row
-
1
])){
NdbScanOperation
::
takeOverScanOp
(
OperationType
opType
,
NdbTransaction
*
pTrans
)
{
NdbRecAttr
*
tRecAttr
=
m_curr_row
;
if
(
tRecAttr
)
{
NdbOperation
*
newOp
=
pTrans
->
getNdbOperation
(
m_currentTable
);
if
(
newOp
==
NULL
){
return
NULL
;
...
...
@@ -1302,6 +1291,7 @@ int
NdbIndexScanOperation
::
next_result_ordered
(
bool
fetchAllowed
,
bool
forceSend
){
m_curr_row
=
0
;
Uint32
u_idx
=
0
,
u_last
=
0
;
Uint32
s_idx
=
m_current_api_receiver
;
// first sorted
Uint32
s_last
=
theParallelism
;
// last sorted
...
...
@@ -1412,7 +1402,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
tRec
=
m_api_receivers
[
s_idx
];
if
(
s_idx
<
s_last
&&
tRec
->
nextResult
()){
tRec
->
copyout
(
theReceiver
);
m_curr_row
=
tRec
->
copyout
(
theReceiver
);
if
(
DEBUG_NEXT_RESULT
)
ndbout_c
(
"return 0"
);
return
0
;
}
...
...
@@ -1667,23 +1657,13 @@ NdbIndexScanOperation::end_of_bound(Uint32 no)
int
NdbIndexScanOperation
::
get_range_no
()
{
if
(
m_read_range_no
)
NdbRecAttr
*
tRecAttr
=
m_curr_row
;
if
(
m_read_range_no
&&
tRecAttr
)
{
Uint32
idx
=
m_current_api_receiver
;
Uint32
last
=
m_api_receivers_count
;
Uint32
row
;
NdbReceiver
*
tRec
;
NdbRecAttr
*
tRecAttr
;
if
(
idx
<
last
&&
(
tRec
=
m_api_receivers
[
idx
])
&&
((
row
=
tRec
->
m_current_row
)
<=
tRec
->
m_defined_rows
)
&&
(
tRecAttr
=
tRec
->
m_rows
[
row
-
1
])){
if
(
m_keyInfo
)
tRecAttr
=
tRecAttr
->
next
();
Uint32
ret
=
*
(
Uint32
*
)
tRecAttr
->
aRef
();
return
ret
;
}
}
return
-
1
;
}
sql/ha_ndbcluster.cc
View file @
4ed658de
...
...
@@ -3843,7 +3843,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NULL_IN_KEY
|
HA_AUTO_PART_KEY
|
HA_NO_VARCHAR
|
HA_NO_PREFIX_CHAR_KEYS
),
HA_NO_PREFIX_CHAR_KEYS
|
HA_NEED_READ_RANGE_BUFFER
),
m_share
(
0
),
m_use_write
(
FALSE
),
m_ignore_dup_key
(
FALSE
),
...
...
@@ -4829,18 +4830,16 @@ int ha_ndbcluster::write_ndb_file()
DBUG_RETURN
(
error
);
}
#ifdef key_multi_range
int
ha_ndbcluster
::
read_multi_range_first
(
key_multi_range
**
found_range_p
,
key_multi_range
*
ranges
,
ha_ndbcluster
::
read_multi_range_first
(
KEY_MULTI_RANGE
**
found_range_p
,
KEY_MULTI_RANGE
*
ranges
,
uint
range_count
,
bool
sorted
,
handler_buffer
*
buffer
)
HANDLER_BUFFER
*
buffer
)
{
DBUG_ENTER
(
"ha_ndbcluster::read_multi_range_first"
);
int
res
;
uint
i
;
KEY
*
key_info
=
table
->
key_info
+
active_index
;
NDB_INDEX_TYPE
index_type
=
get_index_type
(
active_index
);
ulong
reclength
=
table
->
reclength
;
...
...
@@ -4864,8 +4863,9 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
/**
* Copy arguments into member variables
*/
multi_ranges
=
ranges
;
multi_range_count
=
range_count
;
m_multi_ranges
=
ranges
;
multi_range_curr
=
ranges
;
multi_range_end
=
ranges
+
range_count
;
multi_range_sorted
=
sorted
;
multi_range_buffer
=
buffer
;
...
...
@@ -4893,18 +4893,19 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
const
NDBINDEX
*
idx
=
(
NDBINDEX
*
)
m_index
[
active_index
].
index
;
const
NdbOperation
*
lastOp
=
m_active_trans
->
getLastDefinedOperation
();
NdbIndexScanOperation
*
scanOp
=
0
;
for
(
i
=
0
;
i
<
range_count
&&
curr
+
reclength
<=
end_of_buffer
;
i
++
)
for
(;
multi_range_curr
<
multi_range_end
&&
curr
+
reclength
<=
end_of_buffer
;
multi_range_curr
++
)
{
switch
(
index_type
){
case
PRIMARY_KEY_INDEX
:
pk:
{
ranges
[
i
].
range_flag
|=
UNIQUE_RANGE
;
multi_range_curr
->
range_flag
|=
UNIQUE_RANGE
;
if
((
op
=
m_active_trans
->
getNdbOperation
(
tab
))
&&
!
op
->
readTuple
(
lm
)
&&
!
set_primary_key
(
op
,
ranges
[
i
].
start_key
.
key
)
&&
!
set_primary_key
(
op
,
multi_range_curr
->
start_key
.
key
)
&&
!
define_read_attrs
(
curr
,
op
)
&&
(
op
->
setAbortOption
(
IgnoreError
),
true
))
(
op
->
setAbortOption
(
AO_
IgnoreError
),
true
))
curr
+=
reclength
;
else
ERR_RETURN
(
op
?
op
->
getNdbError
()
:
m_active_trans
->
getNdbError
());
...
...
@@ -4914,32 +4915,32 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
case
UNIQUE_INDEX
:
sk:
{
ranges
[
i
].
range_flag
|=
UNIQUE_RANGE
;
multi_range_curr
->
range_flag
|=
UNIQUE_RANGE
;
if
((
op
=
m_active_trans
->
getNdbIndexOperation
(
unique_idx
,
tab
))
&&
!
op
->
readTuple
(
lm
)
&&
!
set_index_key
(
op
,
key_info
,
ranges
[
i
].
start_key
.
key
)
&&
!
set_index_key
(
op
,
key_info
,
multi_range_curr
->
start_key
.
key
)
&&
!
define_read_attrs
(
curr
,
op
)
&&
(
op
->
setAbortOption
(
IgnoreError
),
true
))
(
op
->
setAbortOption
(
AO_
IgnoreError
),
true
))
curr
+=
reclength
;
else
ERR_RETURN
(
op
?
op
->
getNdbError
()
:
m_active_trans
->
getNdbError
());
break
;
}
case
PRIMARY_KEY_ORDERED_INDEX
:
if
(
ranges
[
i
].
start_key
.
length
==
key_info
->
key_length
&&
ranges
[
i
].
start_key
.
flag
==
HA_READ_KEY_EXACT
)
if
(
multi_range_curr
->
start_key
.
length
==
key_info
->
key_length
&&
multi_range_curr
->
start_key
.
flag
==
HA_READ_KEY_EXACT
)
goto
pk
;
goto
range
;
case
UNIQUE_ORDERED_INDEX
:
if
(
ranges
[
i
].
start_key
.
length
==
key_info
->
key_length
&&
ranges
[
i
].
start_key
.
flag
==
HA_READ_KEY_EXACT
&&
!
check_null_in_key
(
key_info
,
ranges
[
i
].
start_key
.
key
,
ranges
[
i
].
start_key
.
length
))
if
(
multi_range_curr
->
start_key
.
length
==
key_info
->
key_length
&&
multi_range_curr
->
start_key
.
flag
==
HA_READ_KEY_EXACT
&&
!
check_null_in_key
(
key_info
,
multi_range_curr
->
start_key
.
key
,
multi_range_curr
->
start_key
.
length
))
goto
sk
;
goto
range
;
case
ORDERED_INDEX
:
range:
ranges
[
i
].
range_flag
&=
~
(
uint
)
UNIQUE_RANGE
;
multi_range_curr
->
range_flag
&=
~
(
uint
)
UNIQUE_RANGE
;
if
(
scanOp
==
0
)
{
if
(
m_multi_cursor
)
...
...
@@ -4954,8 +4955,8 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
end_of_buffer
-=
reclength
;
}
else
if
((
scanOp
=
m_active_trans
->
getNdbIndexScanOperation
(
idx
,
tab
))
&&
!
scanOp
->
readTuples
(
lm
,
0
,
parallelism
,
sorted
,
false
,
true
)
&&
!
define_read_attrs
(
end_of_buffer
-
reclength
,
scanOp
))
&&
!
scanOp
->
readTuples
(
lm
,
0
,
parallelism
,
sorted
,
false
,
true
)
&&
!
define_read_attrs
(
end_of_buffer
-
reclength
,
scanOp
))
{
m_multi_cursor
=
scanOp
;
m_multi_range_cursor_result_ptr
=
end_of_buffer
-
reclength
;
...
...
@@ -4966,14 +4967,15 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
m_active_trans
->
getNdbError
());
}
}
const
key_range
*
keys
[
2
]
=
{
&
ranges
[
i
].
start_key
,
&
ranges
[
i
].
end_key
};
if
((
res
=
set_bounds
(
scanOp
,
keys
,
i
)))
const
key_range
*
keys
[
2
]
=
{
&
multi_range_curr
->
start_key
,
&
multi_range_curr
->
end_key
};
if
((
res
=
set_bounds
(
scanOp
,
keys
,
multi_range_curr
-
ranges
)))
DBUG_RETURN
(
res
);
break
;
}
}
if
(
i
!=
range_count
)
if
(
multi_range_curr
!=
multi_range_end
)
{
/**
* Mark that we're using entire buffer (even if might not) as
...
...
@@ -4995,8 +4997,8 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
lastOp
?
lastOp
->
next
()
:
m_active_trans
->
getFirstDefinedOperation
();
if
(
!
(
res
=
execute_no_commit_ie
(
this
,
m_active_trans
)))
{
m
ulti_range_curr
=
0
;
m
_multi_range_defined_count
=
i
;
m
_multi_range_defined
=
multi_range_curr
;
m
ulti_range_curr
=
ranges
;
m_multi_range_result_ptr
=
(
byte
*
)
buffer
->
buffer
;
DBUG_RETURN
(
read_multi_range_next
(
found_range_p
));
}
...
...
@@ -5010,7 +5012,7 @@ ha_ndbcluster::read_multi_range_first(key_multi_range **found_range_p,
#endif
int
ha_ndbcluster
::
read_multi_range_next
(
key_multi_range
**
multi_range_found_p
)
ha_ndbcluster
::
read_multi_range_next
(
KEY_MULTI_RANGE
**
multi_range_found_p
)
{
DBUG_ENTER
(
"ha_ndbcluster::read_multi_range_next"
);
if
(
m_disable_multi_read
)
...
...
@@ -5022,9 +5024,9 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
int
range_no
;
ulong
reclength
=
table
->
reclength
;
const
NdbOperation
*
op
=
m_current_multi_operation
;
for
(;
multi_range_curr
<
m_multi_range_defined
_count
;
multi_range_curr
++
)
for
(;
multi_range_curr
<
m_multi_range_defined
;
multi_range_curr
++
)
{
if
(
multi_range
s
[
multi_range_curr
].
range_flag
&
UNIQUE_RANGE
)
if
(
multi_range
_curr
->
range_flag
&
UNIQUE_RANGE
)
{
if
(
op
->
getNdbError
().
code
==
0
)
goto
found_next
;
...
...
@@ -5056,13 +5058,14 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
}
range_no
=
m_multi_cursor
->
get_range_no
();
if
(
range_no
==
multi_range_curr
)
uint
current_range_no
=
multi_range_curr
-
m_multi_ranges
;
if
(
range_no
==
current_range_no
)
{
DBUG_MULTI_RANGE
(
4
);
// return current row
goto
found
;
}
else
if
(
range_no
>
(
int
)
multi_range_curr
)
else
if
(
range_no
>
(
int
)
current_range_no
)
{
DBUG_MULTI_RANGE
(
5
);
// wait with current row
...
...
@@ -5107,16 +5110,15 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
}
}
if
(
multi_range_curr
==
multi_range_
count
)
if
(
multi_range_curr
==
multi_range_
end
)
DBUG_RETURN
(
HA_ERR_END_OF_FILE
);
/**
* Read remaining ranges
*/
uint
left
=
multi_range_count
-
multi_range_curr
;
DBUG_RETURN
(
read_multi_range_first
(
multi_range_found_p
,
multi_range
s
+
multi_range
_curr
,
left
,
multi_range_curr
,
multi_range_end
-
multi_range_curr
,
multi_range_sorted
,
multi_range_buffer
));
...
...
@@ -5125,7 +5127,7 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
* Found a record belonging to a scan
*/
m_active_cursor
=
m_multi_cursor
;
*
multi_range_found_p
=
multi_ranges
+
range_no
;
*
multi_range_found_p
=
m
_m
ulti_ranges
+
range_no
;
memcpy
(
table
->
record
[
0
],
m_multi_range_cursor_result_ptr
,
reclength
);
setup_recattr
(
m_active_cursor
->
getFirstRecAttr
());
unpack_record
(
table
->
record
[
0
]);
...
...
@@ -5137,7 +5139,7 @@ ha_ndbcluster::read_multi_range_next(key_multi_range ** multi_range_found_p)
* Found a record belonging to a pk/index op,
* copy result and move to next to prepare for next call
*/
*
multi_range_found_p
=
multi_range
s
+
multi_range
_curr
;
*
multi_range_found_p
=
multi_range_curr
;
memcpy
(
table
->
record
[
0
],
m_multi_range_result_ptr
,
reclength
);
setup_recattr
(
op
->
getFirstRecAttr
());
unpack_record
(
table
->
record
[
0
]);
...
...
@@ -5171,6 +5173,5 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr)
DBUG_RETURN
(
0
);
}
#endif
#endif
/* HAVE_NDBCLUSTER_DB */
sql/ha_ndbcluster.h
View file @
4ed658de
...
...
@@ -114,10 +114,10 @@ class ha_ndbcluster: public handler
/**
* Multi range stuff
*/
int
read_multi_range_first
(
struct
key_multi_range
**
found_range_p
,
struct
key_multi_range
*
ranges
,
uint
range_count
,
bool
sorted
,
struct
handler_buffer
*
buffer
);
int
read_multi_range_next
(
struct
key_multi_range
**
found_range_p
);
int
read_multi_range_first
(
KEY_MULTI_RANGE
**
found_range_p
,
KEY_MULTI_RANGE
*
ranges
,
uint
range_count
,
bool
sorted
,
HANDLER_BUFFER
*
buffer
);
int
read_multi_range_next
(
KEY_MULTI_RANGE
**
found_range_p
);
bool
get_error_message
(
int
error
,
String
*
buf
);
void
info
(
uint
);
...
...
@@ -258,7 +258,8 @@ class ha_ndbcluster: public handler
bool
m_disable_multi_read
;
byte
*
m_multi_range_result_ptr
;
uint
m_multi_range_defined_count
;
KEY_MULTI_RANGE
*
m_multi_ranges
;
KEY_MULTI_RANGE
*
m_multi_range_defined
;
const
NdbOperation
*
m_current_multi_operation
;
NdbIndexScanOperation
*
m_multi_cursor
;
byte
*
m_multi_range_cursor_result_ptr
;
...
...
sql/opt_range.cc
View file @
4ed658de
...
...
@@ -5947,7 +5947,7 @@ int QUICK_RANGE_SELECT::get_next_init(void)
if
(
file
->
table_flags
()
&
HA_NEED_READ_RANGE_BUFFER
)
{
mrange_bufsiz
=
min
(
multi_range_bufsiz
,
QUICK_SELECT_I
::
records
*
head
->
reclength
);
(
QUICK_SELECT_I
::
records
+
1
)
*
head
->
reclength
);
while
(
mrange_bufsiz
&&
!
my_multi_malloc
(
MYF
(
MY_WME
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment