Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
cfa413bf
Commit
cfa413bf
authored
Oct 27, 2010
by
Georgi Kodinov
Browse files
Options
Browse Files
Download
Plain Diff
merge
parents
1ed6c863
0ff3ac9a
Changes
20
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
302 additions
and
26 deletions
+302
-26
mysql-test/r/ps.result
mysql-test/r/ps.result
+38
-0
mysql-test/suite/innodb/r/innodb_bug57255.result
mysql-test/suite/innodb/r/innodb_bug57255.result
+10
-0
mysql-test/suite/innodb/t/innodb_bug57255.test
mysql-test/suite/innodb/t/innodb_bug57255.test
+36
-0
mysql-test/suite/perfschema/r/schema.result
mysql-test/suite/perfschema/r/schema.result
+2
-2
mysql-test/t/ps.test
mysql-test/t/ps.test
+21
-1
sql/ha_partition.h
sql/ha_partition.h
+12
-0
sql/item_subselect.cc
sql/item_subselect.cc
+15
-11
sql/sql_partition.cc
sql/sql_partition.cc
+2
-1
sql/sql_prepare.cc
sql/sql_prepare.cc
+4
-0
sql/sql_show.cc
sql/sql_show.cc
+4
-0
storage/innobase/dict/dict0load.c
storage/innobase/dict/dict0load.c
+75
-8
storage/innobase/handler/ha_innodb.cc
storage/innobase/handler/ha_innodb.cc
+13
-0
storage/innobase/include/db0err.h
storage/innobase/include/db0err.h
+3
-0
storage/innobase/include/dict0load.h
storage/innobase/include/dict0load.h
+2
-0
storage/innobase/include/dict0mem.h
storage/innobase/include/dict0mem.h
+21
-0
storage/innobase/include/que0que.h
storage/innobase/include/que0que.h
+3
-0
storage/innobase/row/row0merge.c
storage/innobase/row/row0merge.c
+1
-1
storage/innobase/row/row0mysql.c
storage/innobase/row/row0mysql.c
+28
-2
storage/innobase/ut/ut0ut.c
storage/innobase/ut/ut0ut.c
+2
-0
support-files/mysql.spec.sh
support-files/mysql.spec.sh
+10
-0
No files found.
mysql-test/r/ps.result
View file @
cfa413bf
...
...
@@ -3005,6 +3005,44 @@ EXECUTE stmt;
1
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
#
# Bug#54494 crash with explain extended and prepared statements
#
CREATE TABLE t1(a INT);
INSERT INTO t1 VALUES (1),(2);
PREPARE stmt FROM 'EXPLAIN EXTENDED SELECT 1 FROM t1 RIGHT JOIN t1 t2 ON 1';
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 select 1 AS `1` from `test`.`t1` `t2` left join `test`.`t1` on(1) where 1
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 select 1 AS `1` from `test`.`t1` `t2` left join `test`.`t1` on(1) where 1
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
#
# Bug#54488 crash when using explain and prepared statements with subqueries
#
CREATE TABLE t1(f1 INT);
INSERT INTO t1 VALUES (1),(1);
PREPARE stmt FROM 'EXPLAIN SELECT 1 FROM t1 WHERE (SELECT (SELECT 1 FROM t1 GROUP BY f1))';
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
3 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
EXECUTE stmt;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
3 SUBQUERY t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
End of 5.1 tests.
...
...
mysql-test/suite/innodb/r/innodb_bug57255.result
0 → 100644
View file @
cfa413bf
create table A(id int not null primary key) engine=innodb;
create table B(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references A(id) on delete cascade) engine=innodb;
create table C(id int not null auto_increment primary key, f1 int not null, foreign key(f1) references B(id) on delete cascade) engine=innodb;
insert into A values(1), (2);
DELETE FROM A where id = 1;
DELETE FROM C where f1 = 2;
DELETE FROM A where id = 1;
DROP TABLE C;
DROP TABLE B;
DROP TABLE A;
mysql-test/suite/innodb/t/innodb_bug57255.test
0 → 100644
View file @
cfa413bf
# Test Bug #57255. Cascade deletes that affect different rows should not
# result in DB_FOREIGN_EXCEED_MAX_CASCADE error
--
source
include
/
have_innodb
.
inc
create
table
A
(
id
int
not
null
primary
key
)
engine
=
innodb
;
create
table
B
(
id
int
not
null
auto_increment
primary
key
,
f1
int
not
null
,
foreign
key
(
f1
)
references
A
(
id
)
on
delete
cascade
)
engine
=
innodb
;
create
table
C
(
id
int
not
null
auto_increment
primary
key
,
f1
int
not
null
,
foreign
key
(
f1
)
references
B
(
id
)
on
delete
cascade
)
engine
=
innodb
;
insert
into
A
values
(
1
),
(
2
);
--
disable_query_log
let
$i
=
257
;
while
(
$i
)
{
insert
into
B
(
f1
)
values
(
1
);
dec
$i
;
}
let
$i
=
486
;
while
(
$i
)
{
insert
into
C
(
f1
)
values
(
2
);
dec
$i
;
}
--
enable_query_log
# Following Deletes should not report error
DELETE
FROM
A
where
id
=
1
;
DELETE
FROM
C
where
f1
=
2
;
DELETE
FROM
A
where
id
=
1
;
DROP
TABLE
C
;
DROP
TABLE
B
;
DROP
TABLE
A
;
mysql-test/suite/perfschema/r/schema.result
View file @
cfa413bf
...
...
@@ -195,6 +195,6 @@ show create table THREADS;
Table Create Table
THREADS CREATE TABLE `THREADS` (
`THREAD_ID` int(11) NOT NULL,
`
ID` int(11) NO
T NULL,
`NAME` varchar(
64
) NOT NULL
`
PROCESSLIST_ID` int(11) DEFAUL
T NULL,
`NAME` varchar(
128
) NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
mysql-test/t/ps.test
View file @
cfa413bf
...
...
@@ -3079,7 +3079,27 @@ EXECUTE stmt;
DEALLOCATE
PREPARE
stmt
;
DROP
TABLE
t1
;
###########################################################################
--
echo
#
--
echo
# Bug#54494 crash with explain extended and prepared statements
--
echo
#
CREATE
TABLE
t1
(
a
INT
);
INSERT
INTO
t1
VALUES
(
1
),(
2
);
PREPARE
stmt
FROM
'EXPLAIN EXTENDED SELECT 1 FROM t1 RIGHT JOIN t1 t2 ON 1'
;
EXECUTE
stmt
;
EXECUTE
stmt
;
DEALLOCATE
PREPARE
stmt
;
DROP
TABLE
t1
;
--
echo
#
--
echo
# Bug#54488 crash when using explain and prepared statements with subqueries
--
echo
#
CREATE
TABLE
t1
(
f1
INT
);
INSERT
INTO
t1
VALUES
(
1
),(
1
);
PREPARE
stmt
FROM
'EXPLAIN SELECT 1 FROM t1 WHERE (SELECT (SELECT 1 FROM t1 GROUP BY f1))'
;
EXECUTE
stmt
;
EXECUTE
stmt
;
DEALLOCATE
PREPARE
stmt
;
DROP
TABLE
t1
;
--
echo
--
echo
End
of
5.1
tests
.
...
...
sql/ha_partition.h
View file @
cfa413bf
...
...
@@ -935,16 +935,22 @@ class ha_partition :public handler
/* lock already taken */
if
(
auto_increment_safe_stmt_log_lock
)
return
;
#ifdef WITH_PARTITION_STORAGE_ENGINE
DBUG_ASSERT
(
table_share
->
ha_part_data
&&
!
auto_increment_lock
);
#endif
if
(
table_share
->
tmp_table
==
NO_TMP_TABLE
)
{
auto_increment_lock
=
TRUE
;
#ifdef WITH_PARTITION_STORAGE_ENGINE
mysql_mutex_lock
(
&
table_share
->
ha_part_data
->
LOCK_auto_inc
);
#endif
}
}
virtual
void
unlock_auto_increment
()
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
DBUG_ASSERT
(
table_share
->
ha_part_data
);
#endif
/*
If auto_increment_safe_stmt_log_lock is true, we have to keep the lock.
It will be set to false and thus unlocked at the end of the statement by
...
...
@@ -952,19 +958,25 @@ class ha_partition :public handler
*/
if
(
auto_increment_lock
&&
!
auto_increment_safe_stmt_log_lock
)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
mysql_mutex_unlock
(
&
table_share
->
ha_part_data
->
LOCK_auto_inc
);
#endif
auto_increment_lock
=
FALSE
;
}
}
virtual
void
set_auto_increment_if_higher
(
Field
*
field
)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
ulonglong
nr
=
(((
Field_num
*
)
field
)
->
unsigned_flag
||
field
->
val_int
()
>
0
)
?
field
->
val_int
()
:
0
;
#endif
lock_auto_increment
();
#ifdef WITH_PARTITION_STORAGE_ENGINE
DBUG_ASSERT
(
table_share
->
ha_part_data
->
auto_inc_initialized
==
TRUE
);
/* must check when the mutex is taken */
if
(
nr
>=
table_share
->
ha_part_data
->
next_auto_inc_val
)
table_share
->
ha_part_data
->
next_auto_inc_val
=
nr
+
1
;
#endif
unlock_auto_increment
();
}
...
...
sql/item_subselect.cc
View file @
cfa413bf
...
...
@@ -1911,18 +1911,22 @@ int subselect_single_select_engine::exec()
}
if
(
!
select_lex
->
uncacheable
&&
thd
->
lex
->
describe
&&
!
(
join
->
select_options
&
SELECT_DESCRIBE
)
&&
join
->
need_tmp
&&
item
->
const_item
()
)
join
->
need_tmp
)
{
/*
Force join->join_tmp creation, because this subquery will be replaced
by a simple select from the materialization temp table by optimize()
called by EXPLAIN and we need to preserve the initial query structure
so we can display it.
*/
select_lex
->
uncacheable
|=
UNCACHEABLE_EXPLAIN
;
select_lex
->
master_unit
()
->
uncacheable
|=
UNCACHEABLE_EXPLAIN
;
if
(
join
->
init_save_join_tab
())
DBUG_RETURN
(
1
);
/* purecov: inspected */
item
->
update_used_tables
();
if
(
item
->
const_item
())
{
/*
Force join->join_tmp creation, because this subquery will be replaced
by a simple select from the materialization temp table by optimize()
called by EXPLAIN and we need to preserve the initial query structure
so we can display it.
*/
select_lex
->
uncacheable
|=
UNCACHEABLE_EXPLAIN
;
select_lex
->
master_unit
()
->
uncacheable
|=
UNCACHEABLE_EXPLAIN
;
if
(
join
->
init_save_join_tab
())
DBUG_RETURN
(
1
);
/* purecov: inspected */
}
}
if
(
item
->
engine_changed
)
{
...
...
sql/sql_partition.cc
View file @
cfa413bf
...
...
@@ -166,12 +166,13 @@ int get_part_iter_for_interval_via_walking(partition_info *part_info,
uint
min_len
,
uint
max_len
,
uint
flags
,
PARTITION_ITERATOR
*
part_iter
);
#ifdef WITH_PARTITION_STORAGE_ENGINE
static
int
cmp_rec_and_tuple
(
part_column_list_val
*
val
,
uint32
nvals_in_rec
);
static
int
cmp_rec_and_tuple_prune
(
part_column_list_val
*
val
,
uint32
n_vals_in_rec
,
bool
tail_is_min
);
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
Convert constants in VALUES definition to the character set the
corresponding field uses.
...
...
sql/sql_prepare.cc
View file @
cfa413bf
...
...
@@ -2420,11 +2420,15 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
sl
->
where
=
sl
->
prep_where
->
copy_andor_structure
(
thd
);
sl
->
where
->
cleanup
();
}
else
sl
->
where
=
NULL
;
if
(
sl
->
prep_having
)
{
sl
->
having
=
sl
->
prep_having
->
copy_andor_structure
(
thd
);
sl
->
having
->
cleanup
();
}
else
sl
->
having
=
NULL
;
DBUG_ASSERT
(
sl
->
join
==
0
);
ORDER
*
order
;
/* Fix GROUP list */
...
...
sql/sql_show.cc
View file @
cfa413bf
...
...
@@ -98,11 +98,13 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **),
static
void
store_key_options
(
THD
*
thd
,
String
*
packet
,
TABLE
*
table
,
KEY
*
key_info
);
#ifdef WITH_PARTITION_STORAGE_ENGINE
static
void
get_cs_converted_string_value
(
THD
*
thd
,
String
*
input_str
,
String
*
output_str
,
CHARSET_INFO
*
cs
,
bool
use_hex
);
#endif
static
void
append_algorithm
(
TABLE_LIST
*
table
,
String
*
buff
);
...
...
@@ -7850,6 +7852,7 @@ void initialize_information_schema_acl()
&
is_internal_schema_access
);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
Convert a string in character set in column character set format
to utf8 character set if possible, the utf8 character set string
...
...
@@ -7941,3 +7944,4 @@ static void get_cs_converted_string_value(THD *thd,
}
return
;
}
#endif
storage/innobase/dict/dict0load.c
View file @
cfa413bf
...
...
@@ -1718,17 +1718,28 @@ dict_load_table(
err
=
dict_load_indexes
(
table
,
heap
);
/* Initialize table foreign_child value. Its value could be
changed when dict_load_foreigns() is called below */
table
->
fk_max_recusive_level
=
0
;
/* If the force recovery flag is set, we open the table irrespective
of the error condition, since the user may want to dump data from the
clustered index. However we load the foreign key information only if
all indexes were loaded. */
if
(
!
cached
)
{
}
else
if
(
err
==
DB_SUCCESS
)
{
err
=
dict_load_foreigns
(
table
->
name
,
TRUE
);
err
=
dict_load_foreigns
(
table
->
name
,
TRUE
,
TRUE
);
if
(
err
!=
DB_SUCCESS
)
{
dict_table_remove_from_cache
(
table
);
table
=
NULL
;
}
}
else
if
(
!
srv_force_recovery
)
{
dict_table_remove_from_cache
(
table
);
table
=
NULL
;
}
table
->
fk_max_recusive_level
=
0
;
#if 0
if (err != DB_SUCCESS && table != NULL) {
...
...
@@ -1952,8 +1963,12 @@ dict_load_foreign(
/*==============*/
const
char
*
id
,
/*!< in: foreign constraint id as a
null-terminated string */
ibool
check_charsets
)
ibool
check_charsets
,
/*!< in: TRUE=check charset compatibility */
ibool
check_recursive
)
/*!< in: Whether to record the foreign table
parent count to avoid unlimited recursive
load of chained foreign tables */
{
dict_foreign_t
*
foreign
;
dict_table_t
*
sys_foreign
;
...
...
@@ -1967,6 +1982,8 @@ dict_load_foreign(
ulint
len
;
ulint
n_fields_and_type
;
mtr_t
mtr
;
dict_table_t
*
for_table
;
dict_table_t
*
ref_table
;
ut_ad
(
mutex_own
(
&
(
dict_sys
->
mutex
)));
...
...
@@ -2051,11 +2068,54 @@ dict_load_foreign(
dict_load_foreign_cols
(
id
,
foreign
);
/* If the foreign table is not yet in the dictionary cache, we
have to load it so that we are able to make type comparisons
in the next function call. */
dict_table_get_low
(
foreign
->
foreign_table_name
);
ref_table
=
dict_table_check_if_in_cache_low
(
foreign
->
referenced_table_name
);
/* We could possibly wind up in a deep recursive calls if
we call dict_table_get_low() again here if there
is a chain of tables concatenated together with
foreign constraints. In such case, each table is
both a parent and child of the other tables, and
act as a "link" in such table chains.
To avoid such scenario, we would need to check the
number of ancesters the current table has. If that
exceeds DICT_FK_MAX_CHAIN_LEN, we will stop loading
the child table.
Foreign constraints are loaded in a Breath First fashion,
that is, the index on FOR_NAME is scanned first, and then
index on REF_NAME. So foreign constrains in which
current table is a child (foreign table) are loaded first,
and then those constraints where current table is a
parent (referenced) table.
Thus we could check the parent (ref_table) table's
reference count (fk_max_recusive_level) to know how deep the
recursive call is. If the parent table (ref_table) is already
loaded, and its fk_max_recusive_level is larger than
DICT_FK_MAX_CHAIN_LEN, we will stop the recursive loading
by skipping loading the child table. It will not affect foreign
constraint check for DMLs since child table will be loaded
at that time for the constraint check. */
if
(
!
ref_table
||
ref_table
->
fk_max_recusive_level
<
DICT_FK_MAX_RECURSIVE_LOAD
)
{
/* If the foreign table is not yet in the dictionary cache, we
have to load it so that we are able to make type comparisons
in the next function call. */
for_table
=
dict_table_get_low
(
foreign
->
foreign_table_name
);
if
(
for_table
&&
ref_table
&&
check_recursive
)
{
/* This is to record the longest chain of ancesters
this table has, if the parent has more ancesters
than this table has, record it after add 1 (for this
parent */
if
(
ref_table
->
fk_max_recusive_level
>=
for_table
->
fk_max_recusive_level
)
{
for_table
->
fk_max_recusive_level
=
ref_table
->
fk_max_recusive_level
+
1
;
}
}
}
/* Note that there may already be a foreign constraint object in
the dictionary cache for this constraint: then the following
...
...
@@ -2080,6 +2140,8 @@ ulint
dict_load_foreigns
(
/*===============*/
const
char
*
table_name
,
/*!< in: table name */
ibool
check_recursive
,
/*!< in: Whether to check recursive
load of tables chained by FK */
ibool
check_charsets
)
/*!< in: TRUE=check charset
compatibility */
{
...
...
@@ -2181,7 +2243,7 @@ dict_load_foreigns(
/* Load the foreign constraint definition to the dictionary cache */
err
=
dict_load_foreign
(
id
,
check_charsets
);
err
=
dict_load_foreign
(
id
,
check_charsets
,
check_recursive
);
if
(
err
!=
DB_SUCCESS
)
{
btr_pcur_close
(
&
pcur
);
...
...
@@ -2209,6 +2271,11 @@ dict_load_foreigns(
mtr_start
(
&
mtr
);
/* Switch to scan index on REF_NAME, fk_max_recusive_level
already been updated when scanning FOR_NAME index, no need to
update again */
check_recursive
=
FALSE
;
goto
start_load
;
}
...
...
storage/innobase/handler/ha_innodb.cc
View file @
cfa413bf
...
...
@@ -900,6 +900,19 @@ convert_error_code_to_mysql(
case
DB_INTERRUPTED
:
my_error
(
ER_QUERY_INTERRUPTED
,
MYF
(
0
));
/* fall through */
case
DB_FOREIGN_EXCEED_MAX_CASCADE
:
push_warning_printf
(
thd
,
MYSQL_ERROR
::
WARN_LEVEL_WARN
,
HA_ERR_ROW_IS_REFERENCED
,
"InnoDB: Cannot delete/update "
"rows with cascading foreign key "
"constraints that exceed max "
"depth of %d. Please "
"drop extra constraints and try "
"again"
,
DICT_FK_MAX_RECURSIVE_LOAD
);
/* fall through */
case
DB_ERROR
:
default:
return
(
-
1
);
/* unspecified error */
...
...
storage/innobase/include/db0err.h
View file @
cfa413bf
...
...
@@ -101,6 +101,9 @@ enum db_err {
requested but this storage does not
exist itself or the stats for a given
table do not exist */
DB_FOREIGN_EXCEED_MAX_CASCADE
,
/* Foreign key constraint related
cascading delete/update exceeds
maximum allowed depth */
/* The following are partial failure codes */
DB_FAIL
=
1000
,
...
...
storage/innobase/include/dict0load.h
View file @
cfa413bf
...
...
@@ -200,6 +200,8 @@ ulint
dict_load_foreigns
(
/*===============*/
const
char
*
table_name
,
/*!< in: table name */
ibool
check_recursive
,
/*!< in: Whether to check recursive
load of tables chained by FK */
ibool
check_charsets
);
/*!< in: TRUE=check charsets
compatibility */
/********************************************************************//**
...
...
storage/innobase/include/dict0mem.h
View file @
cfa413bf
...
...
@@ -116,6 +116,21 @@ ROW_FORMAT=REDUNDANT. */
in table->flags. */
/* @} */
/** Tables could be chained together with Foreign key constraint. When
first load the parent table, we would load all of its descedents.
This could result in rescursive calls and out of stack error eventually.
DICT_FK_MAX_RECURSIVE_LOAD defines the maximum number of recursive loads,
when exceeded, the child table will not be loaded. It will be loaded when
the foreign constraint check needs to be run. */
#define DICT_FK_MAX_RECURSIVE_LOAD 255
/** Similarly, when tables are chained together with foreign key constraints
with on cascading delete/update clause, delete from parent table could
result in recursive cascading calls. This defines the maximum number of
such cascading deletes/updates allowed. When exceeded, the delete from
parent table will fail, and user has to drop excessive foreign constraint
before proceeds. */
#define FK_MAX_CASCADE_DEL 255
/**********************************************************************//**
Creates a table memory object.
...
...
@@ -469,6 +484,12 @@ struct dict_table_struct{
NOT allowed until this count gets to zero;
MySQL does NOT itself check the number of
open handles at drop */
unsigned
fk_max_recusive_level
:
8
;
/*!< maximum recursive level we support when
loading tables chained together with FK
constraints. If exceeds this level, we will
stop loading child table into memory along with
its parent table */
ulint
n_foreign_key_checks_running
;
/*!< count of how many foreign key check
operations are currently being performed
...
...
storage/innobase/include/que0que.h
View file @
cfa413bf
...
...
@@ -381,6 +381,9 @@ struct que_thr_struct{
thus far */
ulint
lock_state
;
/*!< lock state of thread (table or
row) */
ulint
fk_cascade_depth
;
/*!< maximum cascading call depth
supported for foreign key constraint
related delete/updates */
};
#define QUE_THR_MAGIC_N 8476583
...
...
storage/innobase/row/row0merge.c
View file @
cfa413bf
...
...
@@ -2418,7 +2418,7 @@ row_merge_rename_tables(
goto
err_exit
;
}
err
=
dict_load_foreigns
(
old_name
,
TRUE
);
err
=
dict_load_foreigns
(
old_name
,
FALSE
,
TRUE
);
if
(
err
!=
DB_SUCCESS
)
{
err_exit:
...
...
storage/innobase/row/row0mysql.c
View file @
cfa413bf
...
...
@@ -635,6 +635,13 @@ row_mysql_handle_errors(
"InnoDB: "
REFMAN
"forcing-recovery.html"
" for help.
\n
"
,
stderr
);
break
;
case
DB_FOREIGN_EXCEED_MAX_CASCADE
:
fprintf
(
stderr
,
"InnoDB: Cannot delete/update rows with"
" cascading foreign key constraints that exceed max"
" depth of %lu
\n
"
"Please drop excessive foreign constraints"
" and try again
\n
"
,
(
ulong
)
DICT_FK_MAX_RECURSIVE_LOAD
);
break
;
default:
fprintf
(
stderr
,
"InnoDB: unknown error code %lu
\n
"
,
(
ulong
)
err
);
...
...
@@ -1440,11 +1447,15 @@ row_update_for_mysql(
run_again:
thr
->
run_node
=
node
;
thr
->
prev_node
=
node
;
thr
->
fk_cascade_depth
=
0
;
row_upd_step
(
thr
);
err
=
trx
->
error_state
;
/* Reset fk_cascade_depth back to 0 */
thr
->
fk_cascade_depth
=
0
;
if
(
err
!=
DB_SUCCESS
)
{
que_thr_stop_for_mysql
(
thr
);
...
...
@@ -1640,12 +1651,27 @@ row_update_cascade_for_mysql(
trx_t
*
trx
;
trx
=
thr_get_trx
(
thr
);
/* Increment fk_cascade_depth to record the recursive call depth on
a single update/delete that affects multiple tables chained
together with foreign key relations. */
thr
->
fk_cascade_depth
++
;
if
(
thr
->
fk_cascade_depth
>
FK_MAX_CASCADE_DEL
)
{
return
(
DB_FOREIGN_EXCEED_MAX_CASCADE
);
}
run_again:
thr
->
run_node
=
node
;
thr
->
prev_node
=
node
;
row_upd_step
(
thr
);
/* The recursive call for cascading update/delete happens
in above row_upd_step(), reset the counter once we come
out of the recursive call, so it does not accumulate for
different row deletes */
thr
->
fk_cascade_depth
=
0
;
err
=
trx
->
error_state
;
/* Note that the cascade node is a subnode of another InnoDB
...
...
@@ -2120,7 +2146,7 @@ row_table_add_foreign_constraints(
name
,
reject_fks
);
if
(
err
==
DB_SUCCESS
)
{
/* Check that also referencing constraints are ok */
err
=
dict_load_foreigns
(
name
,
TRUE
);
err
=
dict_load_foreigns
(
name
,
FALSE
,
TRUE
);
}
if
(
err
!=
DB_SUCCESS
)
{
...
...
@@ -3992,7 +4018,7 @@ row_rename_table_for_mysql(
an ALTER, not in a RENAME. */
err
=
dict_load_foreigns
(
new_name
,
!
old_is_tmp
||
trx
->
check_foreigns
);
new_name
,
FALSE
,
!
old_is_tmp
||
trx
->
check_foreigns
);
if
(
err
!=
DB_SUCCESS
)
{
ut_print_timestamp
(
stderr
);
...
...
storage/innobase/ut/ut0ut.c
View file @
cfa413bf
...
...
@@ -693,6 +693,8 @@ ut_strerr(
return
(
"Lock structs have exhausted the buffer pool"
);
case
DB_FOREIGN_DUPLICATE_KEY
:
return
(
"Foreign key activated with duplicate keys"
);
case
DB_FOREIGN_EXCEED_MAX_CASCADE
:
return
(
"Foreign key cascade delete/update exceeds max depth"
);
case
DB_TOO_MANY_CONCURRENT_TRXS
:
return
(
"Too many concurrent transactions"
);
case
DB_UNSUPPORTED
:
...
...
support-files/mysql.spec.sh
View file @
cfa413bf
...
...
@@ -974,11 +974,17 @@ echo "=====" >> $STATUS_HISTORY
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/mypluglib.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/semisync_master.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/semisync_slave.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/auth.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/auth_socket.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/auth_test_plugin.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/adt_null.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/libdaemon_example.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/mypluglib.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/semisync_master.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/semisync_slave.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/auth.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/auth_socket.so
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/plugin/debug/auth_test_plugin.so
%if %
{
WITH_TCMALLOC
}
%attr
(
755, root, root
)
%
{
_libdir
}
/mysql/%
{
malloc_lib_target
}
...
...
@@ -1075,6 +1081,10 @@ echo "=====" >> $STATUS_HISTORY
# merging BK trees)
##############################################################################
%changelog
*
Wed Oct 6 2010 Georgi Kodinov <georgi.godinov@oracle.com>
- Added example external authentication
(
WL#1054
)
plugin binaries
*
Wed Aug 11 2010 Joerg Bruehe <joerg.bruehe@oracle.com>
- With a recent spec file cleanup, names have changed: A
"-community"
part was dropped.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment