Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
b045a815
Commit
b045a815
authored
Nov 13, 2006
by
mskold/marty@mysql.com/linux.site
Browse files
Options
Browse Files
Download
Plain Diff
Merge mskold@bk-internal.mysql.com:/home/bk/mysql-5.0-ndb
into mysql.com:/windows/Linux_space/MySQL/mysql-5.0-ndb
parents
ab503346
f2933507
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
259 additions
and
53 deletions
+259
-53
mysql-test/r/ndb_index_unique.result
mysql-test/r/ndb_index_unique.result
+34
-1
mysql-test/t/ndb_index_unique.test
mysql-test/t/ndb_index_unique.test
+14
-1
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+193
-49
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+18
-2
No files found.
mysql-test/r/ndb_index_unique.result
View file @
b045a815
...
...
@@ -140,7 +140,40 @@ b int unsigned not null,
c int unsigned,
UNIQUE USING HASH (b, c)
) engine=ndbcluster;
ERROR 42000: Column 'c' is used with UNIQUE or INDEX but is not defined as NOT NULL
Warnings:
Warning 1121 Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan
insert t2 values(1,1,NULL),(2,2,2),(3,3,NULL),(4,4,4),(5,5,NULL),(6,6,6),(7,7,NULL),(8,3,NULL),(9,3,NULL);
select * from t2 where c IS NULL order by a;
a b c
1 1 NULL
3 3 NULL
5 5 NULL
7 7 NULL
8 3 NULL
9 3 NULL
select * from t2 where b = 3 AND c IS NULL order by a;
a b c
3 3 NULL
8 3 NULL
9 3 NULL
select * from t2 where (b = 3 OR b = 5) AND c IS NULL order by a;
a b c
3 3 NULL
5 5 NULL
8 3 NULL
9 3 NULL
set @old_ecpd = @@session.engine_condition_pushdown;
set engine_condition_pushdown = true;
explain select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY,b PRIMARY 4 NULL 1 Using where with pushed condition
select * from t2 where (b = 3 OR b = 5) AND c IS NULL AND a < 9 order by a;
a b c
3 3 NULL
5 5 NULL
8 3 NULL
set engine_condition_pushdown = @old_ecpd;
drop table t2;
CREATE TABLE t3 (
a int unsigned NOT NULL,
b int unsigned not null,
...
...
mysql-test/t/ndb_index_unique.test
View file @
b045a815
...
...
@@ -85,7 +85,6 @@ select * from t2 order by a;
drop
table
t2
;
--
error
1121
CREATE
TABLE
t2
(
a
int
unsigned
NOT
NULL
PRIMARY
KEY
,
b
int
unsigned
not
null
,
...
...
@@ -93,6 +92,20 @@ CREATE TABLE t2 (
UNIQUE
USING
HASH
(
b
,
c
)
)
engine
=
ndbcluster
;
insert
t2
values
(
1
,
1
,
NULL
),(
2
,
2
,
2
),(
3
,
3
,
NULL
),(
4
,
4
,
4
),(
5
,
5
,
NULL
),(
6
,
6
,
6
),(
7
,
7
,
NULL
),(
8
,
3
,
NULL
),(
9
,
3
,
NULL
);
select
*
from
t2
where
c
IS
NULL
order
by
a
;
select
*
from
t2
where
b
=
3
AND
c
IS
NULL
order
by
a
;
select
*
from
t2
where
(
b
=
3
OR
b
=
5
)
AND
c
IS
NULL
order
by
a
;
set
@
old_ecpd
=
@@
session
.
engine_condition_pushdown
;
set
engine_condition_pushdown
=
true
;
explain
select
*
from
t2
where
(
b
=
3
OR
b
=
5
)
AND
c
IS
NULL
AND
a
<
9
order
by
a
;
select
*
from
t2
where
(
b
=
3
OR
b
=
5
)
AND
c
IS
NULL
AND
a
<
9
order
by
a
;
set
engine_condition_pushdown
=
@
old_ecpd
;
drop
table
t2
;
#
# Show use of PRIMARY KEY USING HASH indexes
#
...
...
sql/ha_ndbcluster.cc
View file @
b045a815
...
...
@@ -1059,6 +1059,7 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
int
error
=
0
;
const
char
*
index_name
;
char
unique_index_name
[
FN_LEN
];
bool
null_in_unique_index
=
false
;
static
const
char
*
unique_suffix
=
"$unique"
;
KEY
*
key_info
=
tab
->
key_info
;
const
char
**
key_name
=
tab
->
s
->
keynames
.
type_names
;
...
...
@@ -1096,7 +1097,13 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
error
=
create_unique_index
(
unique_index_name
,
key_info
);
break
;
case
UNIQUE_INDEX
:
if
(
!
(
error
=
check_index_fields_not_null
(
i
)))
if
(
check_index_fields_not_null
(
i
))
{
push_warning_printf
(
current_thd
,
MYSQL_ERROR
::
WARN_LEVEL_WARN
,
ER_NULL_COLUMN_IN_INDEX
,
"Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"
);
null_in_unique_index
=
true
;
}
error
=
create_unique_index
(
unique_index_name
,
key_info
);
break
;
case
ORDERED_INDEX
:
...
...
@@ -1129,6 +1136,11 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase)
m_index
[
i
].
unique_index
=
(
void
*
)
index
;
error
=
fix_unique_index_attr_order
(
m_index
[
i
],
index
,
key_info
);
}
if
(
idx_type
==
UNIQUE_INDEX
&&
phase
!=
ILBP_CREATE
&&
check_index_fields_not_null
(
i
))
null_in_unique_index
=
true
;
m_index
[
i
].
null_in_unique_index
=
null_in_unique_index
;
}
DBUG_RETURN
(
error
);
...
...
@@ -1150,7 +1162,7 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const
ORDERED_INDEX
);
}
int
ha_ndbcluster
::
check_index_fields_not_null
(
uint
inx
)
bool
ha_ndbcluster
::
check_index_fields_not_null
(
uint
inx
)
{
KEY
*
key_info
=
table
->
key_info
+
inx
;
KEY_PART_INFO
*
key_part
=
key_info
->
key_part
;
...
...
@@ -1161,14 +1173,10 @@ int ha_ndbcluster::check_index_fields_not_null(uint inx)
{
Field
*
field
=
key_part
->
field
;
if
(
field
->
maybe_null
())
{
my_printf_error
(
ER_NULL_COLUMN_IN_INDEX
,
ER
(
ER_NULL_COLUMN_IN_INDEX
),
MYF
(
0
),
field
->
field_name
);
DBUG_RETURN
(
ER_NULL_COLUMN_IN_INDEX
);
}
DBUG_RETURN
(
true
);
}
DBUG_RETURN
(
0
);
DBUG_RETURN
(
false
);
}
void
ha_ndbcluster
::
release_metadata
()
...
...
@@ -1261,6 +1269,12 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const
return
m_index
[
idx_no
].
type
;
}
inline
bool
ha_ndbcluster
::
has_null_in_unique_index
(
uint
idx_no
)
const
{
DBUG_ASSERT
(
idx_no
<
MAX_KEY
);
return
m_index
[
idx_no
].
null_in_unique_index
;
}
/*
Get the flags for an index
...
...
@@ -2089,6 +2103,42 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
DBUG_RETURN
(
next_result
(
buf
));
}
/*
Unique index scan in NDB (full table scan with scan filter)
*/
int
ha_ndbcluster
::
unique_index_scan
(
const
KEY
*
key_info
,
const
byte
*
key
,
uint
key_len
,
byte
*
buf
)
{
int
res
;
NdbScanOperation
*
op
;
NdbTransaction
*
trans
=
m_active_trans
;
DBUG_ENTER
(
"unique_index_scan"
);
DBUG_PRINT
(
"enter"
,
(
"Starting new scan on %s"
,
m_tabname
));
NdbOperation
::
LockMode
lm
=
(
NdbOperation
::
LockMode
)
get_ndb_lock_type
(
m_lock
.
type
);
bool
need_pk
=
(
lm
==
NdbOperation
::
LM_Read
);
if
(
!
(
op
=
trans
->
getNdbScanOperation
((
const
NDBTAB
*
)
m_table
))
||
op
->
readTuples
(
lm
,
(
need_pk
)
?
NdbScanOperation
::
SF_KeyInfo
:
0
,
parallelism
))
ERR_RETURN
(
trans
->
getNdbError
());
m_active_cursor
=
op
;
if
(
generate_scan_filter_from_key
(
op
,
key_info
,
key
,
key_len
,
buf
))
DBUG_RETURN
(
ndb_err
(
trans
));
if
((
res
=
define_read_attrs
(
buf
,
op
)))
DBUG_RETURN
(
res
);
if
(
execute_no_commit
(
this
,
trans
,
false
)
!=
0
)
DBUG_RETURN
(
ndb_err
(
trans
));
DBUG_PRINT
(
"exit"
,
(
"Scan started successfully"
));
DBUG_RETURN
(
next_result
(
buf
));
}
/*
Start full table scan in NDB
*/
...
...
@@ -2763,7 +2813,7 @@ int ha_ndbcluster::index_read(byte *buf,
}
else
if
(
type
==
UNIQUE_INDEX
)
{
DBUG_RETURN
(
1
);
DBUG_RETURN
(
unique_index_scan
(
key_info
,
key
,
key_len
,
buf
)
);
}
break
;
case
ORDERED_INDEX
:
...
...
@@ -2856,12 +2906,13 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
bool
eq_r
,
bool
sorted
,
byte
*
buf
)
{
KEY
*
key_info
;
ndb_index_type
type
=
get_index_type
(
active_index
);
KEY
*
key_info
;
int
error
=
1
;
DBUG_ENTER
(
"ha_ndbcluster::read_range_first_to_buf"
);
DBUG_PRINT
(
"info"
,
(
"eq_r: %d, sorted: %d"
,
eq_r
,
sorted
));
switch
(
get_index_type
(
active_index
)
){
switch
(
type
){
case
PRIMARY_KEY_ORDERED_INDEX
:
case
PRIMARY_KEY_INDEX
:
key_info
=
table
->
key_info
+
active_index
;
...
...
@@ -2887,6 +2938,14 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
error
=
unique_index_read
(
start_key
->
key
,
start_key
->
length
,
buf
);
DBUG_RETURN
(
error
==
HA_ERR_KEY_NOT_FOUND
?
HA_ERR_END_OF_FILE
:
error
);
}
else
if
(
type
==
UNIQUE_INDEX
)
{
error
=
unique_index_scan
(
key_info
,
start_key
->
key
,
start_key
->
length
,
buf
);
DBUG_RETURN
(
error
==
HA_ERR_KEY_NOT_FOUND
?
HA_ERR_END_OF_FILE
:
error
);
}
break
;
default:
break
;
...
...
@@ -6116,6 +6175,30 @@ ha_ndbcluster::release_completed_operations(NdbTransaction *trans,
trans
->
releaseCompletedOperations
();
}
bool
ha_ndbcluster
::
null_value_index_search
(
KEY_MULTI_RANGE
*
ranges
,
KEY_MULTI_RANGE
*
end_range
,
HANDLER_BUFFER
*
buffer
)
{
DBUG_ENTER
(
"null_value_index_search"
);
KEY
*
key_info
=
table
->
key_info
+
active_index
;
KEY_MULTI_RANGE
*
range
=
ranges
;
ulong
reclength
=
table
->
s
->
reclength
;
byte
*
curr
=
(
byte
*
)
buffer
->
buffer
;
byte
*
end_of_buffer
=
(
byte
*
)
buffer
->
buffer_end
;
for
(;
range
<
end_range
&&
curr
+
reclength
<=
end_of_buffer
;
range
++
)
{
const
byte
*
key
=
range
->
start_key
.
key
;
uint
key_len
=
range
->
start_key
.
length
;
if
(
check_null_in_key
(
key_info
,
key
,
key_len
))
DBUG_RETURN
(
true
);
curr
+=
reclength
;
}
DBUG_RETURN
(
false
);
}
int
ha_ndbcluster
::
read_multi_range_first
(
KEY_MULTI_RANGE
**
found_range_p
,
KEY_MULTI_RANGE
*
ranges
,
...
...
@@ -6132,11 +6215,14 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
NdbOperation
*
op
;
Thd_ndb
*
thd_ndb
=
get_thd_ndb
(
current_thd
);
if
(
uses_blob_value
(
m_retrieve_all_fields
))
{
/**
* blobs
can't be batched currently
* blobs and unique hash index with NULL
can't be batched currently
*/
if
(
uses_blob_value
(
m_retrieve_all_fields
)
||
(
index_type
==
UNIQUE_INDEX
&&
has_null_in_unique_index
(
active_index
)
&&
null_value_index_search
(
ranges
,
ranges
+
range_count
,
buffer
)))
{
m_disable_multi_read
=
TRUE
;
DBUG_RETURN
(
handler
::
read_multi_range_first
(
found_range_p
,
ranges
,
...
...
@@ -6192,7 +6278,6 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
goto
range
;
/* fall through */
case
PRIMARY_KEY_INDEX
:
{
multi_range_curr
->
range_flag
|=
UNIQUE_RANGE
;
if
((
op
=
m_active_trans
->
getNdbOperation
(
tab
))
&&
!
op
->
readTuple
(
lm
)
&&
...
...
@@ -6203,8 +6288,6 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
else
ERR_RETURN
(
op
?
op
->
getNdbError
()
:
m_active_trans
->
getNdbError
());
break
;
}
break
;
case
UNIQUE_ORDERED_INDEX
:
if
(
!
(
multi_range_curr
->
start_key
.
length
==
key_info
->
key_length
&&
multi_range_curr
->
start_key
.
flag
==
HA_READ_KEY_EXACT
&&
...
...
@@ -6213,7 +6296,6 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
goto
range
;
/* fall through */
case
UNIQUE_INDEX
:
{
multi_range_curr
->
range_flag
|=
UNIQUE_RANGE
;
if
((
op
=
m_active_trans
->
getNdbIndexOperation
(
unique_idx
,
tab
))
&&
!
op
->
readTuple
(
lm
)
&&
...
...
@@ -6224,7 +6306,6 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
else
ERR_RETURN
(
op
?
op
->
getNdbError
()
:
m_active_trans
->
getNdbError
());
break
;
}
case
ORDERED_INDEX
:
{
range:
...
...
@@ -7967,13 +8048,32 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
NdbScanOperation
*
op
)
{
DBUG_ENTER
(
"generate_scan_filter"
);
if
(
ndb_cond_stack
)
{
DBUG_PRINT
(
"info"
,
(
"Generating scan filter"
));
NdbScanFilter
filter
(
op
);
DBUG_RETURN
(
generate_scan_filter_from_cond
(
ndb_cond_stack
,
filter
));
}
else
{
DBUG_PRINT
(
"info"
,
(
"Empty stack"
));
}
DBUG_RETURN
(
0
);
}
int
ha_ndbcluster
::
generate_scan_filter_from_cond
(
Ndb_cond_stack
*
ndb_cond_stack
,
NdbScanFilter
&
filter
)
{
DBUG_ENTER
(
"generate_scan_filter_from_cond"
);
bool
multiple_cond
=
FALSE
;
DBUG_PRINT
(
"info"
,
(
"Generating scan filter"
));
// Wrap an AND group around multiple conditions
if
(
ndb_cond_stack
->
next
)
{
if
(
ndb_cond_stack
->
next
)
{
multiple_cond
=
TRUE
;
if
(
filter
.
begin
()
==
-
1
)
DBUG_RETURN
(
1
);
...
...
@@ -7992,11 +8092,55 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack,
}
if
(
multiple_cond
&&
filter
.
end
()
==
-
1
)
DBUG_RETURN
(
1
);
DBUG_RETURN
(
0
);
}
int
ha_ndbcluster
::
generate_scan_filter_from_key
(
NdbScanOperation
*
op
,
const
KEY
*
key_info
,
const
byte
*
key
,
uint
key_len
,
byte
*
buf
)
{
KEY_PART_INFO
*
key_part
=
key_info
->
key_part
;
KEY_PART_INFO
*
end
=
key_part
+
key_info
->
key_parts
;
NdbScanFilter
filter
(
op
);
int
res
;
DBUG_ENTER
(
"generate_scan_filter_from_key"
);
filter
.
begin
(
NdbScanFilter
::
AND
);
for
(;
key_part
!=
end
;
key_part
++
)
{
Field
*
field
=
key_part
->
field
;
uint32
pack_len
=
field
->
pack_length
();
const
byte
*
ptr
=
key
;
char
buf
[
256
];
DBUG_PRINT
(
"info"
,
(
"Filtering value for %s"
,
field
->
field_name
));
DBUG_DUMP
(
"key"
,
(
char
*
)
ptr
,
pack_len
);
if
(
key_part
->
null_bit
)
{
DBUG_PRINT
(
"info"
,
(
"Generating ISNULL filter"
));
if
(
filter
.
isnull
(
key_part
->
fieldnr
-
1
)
==
-
1
)
DBUG_RETURN
(
1
);
}
else
{
DBUG_PRINT
(
"info"
,
(
"Empty stack"
));
DBUG_PRINT
(
"info"
,
(
"Generating EQ filter"
));
if
(
filter
.
cmp
(
NdbScanFilter
::
COND_EQ
,
key_part
->
fieldnr
-
1
,
ptr
,
pack_len
)
==
-
1
)
DBUG_RETURN
(
1
);
}
key
+=
key_part
->
store_length
;
}
// Add any pushed condition
if
(
m_cond_stack
&&
(
res
=
generate_scan_filter_from_cond
(
m_cond_stack
,
filter
)))
DBUG_RETURN
(
res
);
if
(
filter
.
end
()
==
-
1
)
DBUG_RETURN
(
1
);
DBUG_RETURN
(
0
);
}
...
...
sql/ha_ndbcluster.h
View file @
b045a815
...
...
@@ -56,6 +56,7 @@ typedef struct ndb_index_data {
void
*
index
;
void
*
unique_index
;
unsigned
char
*
unique_index_attrid_map
;
bool
null_in_unique_index
;
}
NDB_INDEX_DATA
;
typedef
struct
st_ndbcluster_share
{
...
...
@@ -546,7 +547,9 @@ class ha_ndbcluster: public handler
KEY_MULTI_RANGE
*
ranges
,
uint
range_count
,
bool
sorted
,
HANDLER_BUFFER
*
buffer
);
int
read_multi_range_next
(
KEY_MULTI_RANGE
**
found_range_p
);
bool
null_value_index_search
(
KEY_MULTI_RANGE
*
ranges
,
KEY_MULTI_RANGE
*
end_range
,
HANDLER_BUFFER
*
buffer
);
bool
get_error_message
(
int
error
,
String
*
buf
);
int
info
(
uint
);
int
extra
(
enum
ha_extra_function
operation
);
...
...
@@ -649,7 +652,8 @@ static void set_tabname(const char *pathname, char *tabname);
void
release_metadata
();
NDB_INDEX_TYPE
get_index_type
(
uint
idx_no
)
const
;
NDB_INDEX_TYPE
get_index_type_from_table
(
uint
index_no
)
const
;
int
check_index_fields_not_null
(
uint
index_no
);
bool
has_null_in_unique_index
(
uint
idx_no
)
const
;
bool
check_index_fields_not_null
(
uint
index_no
);
int
pk_read
(
const
byte
*
key
,
uint
key_len
,
byte
*
buf
);
int
complemented_pk_read
(
const
byte
*
old_data
,
byte
*
new_data
);
...
...
@@ -663,6 +667,11 @@ static void set_tabname(const char *pathname, char *tabname);
int
ordered_index_scan
(
const
key_range
*
start_key
,
const
key_range
*
end_key
,
bool
sorted
,
bool
descending
,
byte
*
buf
);
int
unique_index_scan
(
const
KEY
*
key_info
,
const
byte
*
key
,
uint
key_len
,
byte
*
buf
);
int
full_table_scan
(
byte
*
buf
);
int
fetch_next
(
NdbScanOperation
*
op
);
int
next_result
(
byte
*
buf
);
...
...
@@ -725,6 +734,13 @@ bool uses_blob_value(bool all_fields);
int
build_scan_filter
(
Ndb_cond
*
&
cond
,
NdbScanFilter
*
filter
);
int
generate_scan_filter
(
Ndb_cond_stack
*
cond_stack
,
NdbScanOperation
*
op
);
int
generate_scan_filter_from_cond
(
Ndb_cond_stack
*
cond_stack
,
NdbScanFilter
&
filter
);
int
generate_scan_filter_from_key
(
NdbScanOperation
*
op
,
const
KEY
*
key_info
,
const
byte
*
key
,
uint
key_len
,
byte
*
buf
);
friend
int
execute_commit
(
ha_ndbcluster
*
,
NdbTransaction
*
);
friend
int
execute_no_commit
(
ha_ndbcluster
*
,
NdbTransaction
*
,
bool
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment