Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
bd53a308
Commit
bd53a308
authored
Nov 29, 2002
by
monty@mashka.mysql.fi
Browse files
Options
Browse Files
Download
Plain Diff
Merge work:/my/mysql-4.0 into mashka.mysql.fi:/home/my/mysql-4.0
parents
42a49919
6603d752
Changes
29
Show whitespace changes
Inline
Side-by-side
Showing
29 changed files
with
773 additions
and
633 deletions
+773
-633
heap/_check.c
heap/_check.c
+54
-3
heap/hp_delete.c
heap/hp_delete.c
+4
-0
heap/hp_scan.c
heap/hp_scan.c
+1
-0
heap/hp_update.c
heap/hp_update.c
+4
-0
heap/hp_write.c
heap/hp_write.c
+5
-0
include/my_base.h
include/my_base.h
+2
-1
isam/extra.c
isam/extra.c
+6
-0
myisam/mi_check.c
myisam/mi_check.c
+6
-6
myisam/mi_extra.c
myisam/mi_extra.c
+4
-0
myisam/sort.c
myisam/sort.c
+12
-10
myisammrg/myrg_extra.c
myisammrg/myrg_extra.c
+2
-1
mysql-test/r/multi_update.result
mysql-test/r/multi_update.result
+19
-29
mysql-test/t/multi_update.test
mysql-test/t/multi_update.test
+21
-29
mysys/mf_iocache.c
mysys/mf_iocache.c
+2
-0
sql/item_cmpfunc.cc
sql/item_cmpfunc.cc
+4
-4
sql/mysql_priv.h
sql/mysql_priv.h
+19
-1
sql/sql_base.cc
sql/sql_base.cc
+11
-6
sql/sql_class.h
sql/sql_class.h
+31
-32
sql/sql_delete.cc
sql/sql_delete.cc
+32
-35
sql/sql_insert.cc
sql/sql_insert.cc
+6
-18
sql/sql_olap.cc
sql/sql_olap.cc
+2
-2
sql/sql_parse.cc
sql/sql_parse.cc
+17
-62
sql/sql_select.cc
sql/sql_select.cc
+78
-7
sql/sql_select.h
sql/sql_select.h
+1
-0
sql/sql_union.cc
sql/sql_union.cc
+11
-8
sql/sql_update.cc
sql/sql_update.cc
+401
-367
sql/sql_yacc.yy
sql/sql_yacc.yy
+6
-5
sql/table.h
sql/table.h
+10
-6
sql/uniques.cc
sql/uniques.cc
+2
-1
No files found.
heap/_check.c
View file @
bd53a308
...
...
@@ -21,19 +21,70 @@
static
int
check_one_key
(
HP_KEYDEF
*
keydef
,
uint
keynr
,
ulong
records
,
ulong
blength
,
my_bool
print_status
);
/* Returns 0 if the HEAP is ok */
/*
Check if keys and rows are ok in a heap table
SYNOPSIS
heap_check_heap()
info Table handler
print_status Prints some extra status
NOTES
Doesn't change the state of the table handler
RETURN VALUES
0 ok
1 error
*/
int
heap_check_heap
(
HP_INFO
*
info
,
my_bool
print_status
)
{
int
error
;
uint
key
;
ulong
records
=
0
,
deleted
=
0
,
pos
,
next_block
;
HP_SHARE
*
share
=
info
->
s
;
DBUG_ENTER
(
"heap_check_keys"
);
HP_INFO
save_info
=
*
info
;
/* Needed because scan_init */
DBUG_ENTER
(
"heap_check_heap"
);
for
(
error
=
key
=
0
;
key
<
share
->
keys
;
key
++
)
for
(
error
=
key
=
0
;
key
<
share
->
keys
;
key
++
)
error
|=
check_one_key
(
share
->
keydef
+
key
,
key
,
share
->
records
,
share
->
blength
,
print_status
);
/*
This is basicly the same code as in hp_scan, but we repeat it here to
get shorter DBUG log file.
*/
for
(
pos
=
next_block
=
0
;
;
pos
++
)
{
if
(
pos
<
next_block
)
{
info
->
current_ptr
+=
share
->
block
.
recbuffer
;
}
else
{
next_block
+=
share
->
block
.
records_in_block
;
if
(
next_block
>=
share
->
records
+
share
->
deleted
)
{
next_block
=
share
->
records
+
share
->
deleted
;
if
(
pos
>=
next_block
)
break
;
/* End of file */
}
}
_hp_find_record
(
info
,
pos
);
if
(
!
info
->
current_ptr
[
share
->
reclength
])
deleted
++
;
else
records
++
;
}
if
(
records
!=
share
->
records
||
deleted
!=
share
->
deleted
)
{
DBUG_PRINT
(
"error"
,(
"Found rows: %lu (%lu) deleted %lu (%lu)"
,
records
,
share
->
records
,
deleted
,
share
->
deleted
));
error
=
1
;
}
*
info
=
save_info
;
DBUG_RETURN
(
error
);
}
...
...
heap/hp_delete.c
View file @
bd53a308
...
...
@@ -48,6 +48,10 @@ int heap_delete(HP_INFO *info, const byte *record)
pos
[
share
->
reclength
]
=
0
;
/* Record deleted */
share
->
deleted
++
;
info
->
current_hash_ptr
=
0
;
#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
DBUG_EXECUTE
(
"check_heap"
,
heap_check_heap
(
info
,
0
););
#endif
DBUG_RETURN
(
0
);
err:
if
(
++
(
share
->
records
)
==
share
->
blength
)
...
...
heap/hp_scan.c
View file @
bd53a308
...
...
@@ -62,6 +62,7 @@ int heap_scan(register HP_INFO *info, byte *record)
}
if
(
!
info
->
current_ptr
[
share
->
reclength
])
{
DBUG_PRINT
(
"warning"
,(
"Found deleted record"
));
info
->
update
=
HA_STATE_PREV_FOUND
|
HA_STATE_NEXT_FOUND
;
DBUG_RETURN
(
my_errno
=
HA_ERR_RECORD_DELETED
);
}
...
...
heap/hp_update.c
View file @
bd53a308
...
...
@@ -46,6 +46,10 @@ int heap_update(HP_INFO *info, const byte *old, const byte *heap_new)
memcpy
(
pos
,
heap_new
,(
size_t
)
share
->
reclength
);
if
(
++
(
share
->
records
)
==
share
->
blength
)
share
->
blength
+=
share
->
blength
;
#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
DBUG_EXECUTE
(
"check_heap"
,
heap_check_heap
(
info
,
0
););
#endif
DBUG_RETURN
(
0
);
err:
...
...
heap/hp_write.c
View file @
bd53a308
...
...
@@ -60,7 +60,11 @@ int heap_write(HP_INFO *info, const byte *record)
info
->
current_ptr
=
pos
;
info
->
current_hash_ptr
=
0
;
info
->
update
|=
HA_STATE_AKTIV
;
#if !defined(DBUG_OFF) && defined(EXTRA_HEAP_DEBUG)
DBUG_EXECUTE
(
"check_heap"
,
heap_check_heap
(
info
,
0
););
#endif
DBUG_RETURN
(
0
);
err:
DBUG_PRINT
(
"info"
,(
"Duplicate key: %d"
,
key
));
info
->
errkey
=
key
;
...
...
@@ -74,6 +78,7 @@ int heap_write(HP_INFO *info, const byte *record)
*
((
byte
**
)
pos
)
=
share
->
del_link
;
share
->
del_link
=
pos
;
pos
[
share
->
reclength
]
=
0
;
/* Record deleted */
DBUG_RETURN
(
my_errno
);
}
/* heap_write */
...
...
include/my_base.h
View file @
bd53a308
...
...
@@ -109,7 +109,8 @@ enum ha_extra_function {
HA_EXTRA_BULK_INSERT_BEGIN
,
HA_EXTRA_BULK_INSERT_FLUSH
,
/* Flush one index */
HA_EXTRA_BULK_INSERT_END
,
HA_EXTRA_PREPARE_FOR_DELETE
HA_EXTRA_PREPARE_FOR_DELETE
,
HA_EXTRA_PREPARE_FOR_UPDATE
/* Remove read cache if problems */
};
/* The following is parameter to ha_panic() */
...
...
isam/extra.c
View file @
bd53a308
...
...
@@ -123,6 +123,7 @@ int nisam_extra(N_INFO *info, enum ha_extra_function function)
}
#endif
if
(
!
(
info
->
opt_flag
&
(
READ_CACHE_USED
|
WRITE_CACHE_USED
)))
{
if
(
!
(
init_io_cache
(
&
info
->
rec_cache
,
info
->
dfile
,
0
,
WRITE_CACHE
,
info
->
s
->
state
.
data_file_length
,
(
pbool
)
(
info
->
lock_type
!=
F_UNLCK
),
...
...
@@ -131,7 +132,12 @@ int nisam_extra(N_INFO *info, enum ha_extra_function function)
info
->
opt_flag
|=
WRITE_CACHE_USED
;
info
->
update
&=
~
HA_STATE_ROW_CHANGED
;
}
}
break
;
case
HA_EXTRA_PREPARE_FOR_UPDATE
:
if
(
info
->
s
->
data_file_type
!=
DYNAMIC_RECORD
)
break
;
/* Remove read/write cache if dynamic rows */
case
HA_EXTRA_NO_CACHE
:
if
(
info
->
opt_flag
&
(
READ_CACHE_USED
|
WRITE_CACHE_USED
))
{
...
...
myisam/mi_check.c
View file @
bd53a308
...
...
@@ -2329,13 +2329,13 @@ int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
for
(
i
=
0
;
i
<
sort_info
.
total_keys
;
i
++
)
{
sort_param
[
i
].
read_cache
=
param
->
read_cache
;
sort_param
[
i
].
sortbuff_size
=
/*
two approaches: the same amount of memory for each thread
or the memory for the same number of keys for each thread...
In the second one all the threads will fill their sort_buffers
(and call write_keys) at the same time, putting more stress on i/o.
*/
sort_param
[
i
].
sortbuff_size
=
#ifndef USING_SECOND_APPROACH
param
->
sort_buffer_length
/
sort_info
.
total_keys
;
#else
...
...
myisam/mi_extra.c
View file @
bd53a308
...
...
@@ -165,6 +165,10 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
HA_STATE_EXTEND_BLOCK
);
}
break
;
case
HA_EXTRA_PREPARE_FOR_UPDATE
:
if
(
info
->
s
->
data_file_type
!=
DYNAMIC_RECORD
)
break
;
/* Remove read/write cache if dynamic rows */
case
HA_EXTRA_NO_CACHE
:
if
(
info
->
opt_flag
&
(
READ_CACHE_USED
|
WRITE_CACHE_USED
))
{
...
...
myisam/sort.c
View file @
bd53a308
...
...
@@ -344,7 +344,6 @@ pthread_handler_decl(thr_find_all_keys,arg)
mi_check_print_error
(
info
->
sort_info
->
param
,
"Sort buffer to small"
);
/* purecov: tested */
goto
err
;
/* purecov: tested */
}
// (*info->lock_in_memory)(info->sort_info->param);/* Everything is allocated */
if
(
info
->
sort_info
->
param
->
testflag
&
T_VERBOSE
)
printf
(
"Key %d - Allocating buffer for %d keys
\n
"
,
info
->
key
+
1
,
keys
);
...
...
@@ -424,9 +423,9 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
byte
*
mergebuf
=
0
;
LINT_INIT
(
length
);
for
(
i
=
0
,
sinfo
=
sort_param
;
i
<
sort_info
->
total_keys
;
i
++
,
rec_per_key_part
+=
sinfo
->
keyinfo
->
keysegs
,
sinfo
++
)
for
(
i
=
0
,
sinfo
=
sort_param
;
i
<
sort_info
->
total_keys
;
i
++
,
rec_per_key_part
+=
sinfo
->
keyinfo
->
keysegs
,
sinfo
++
)
{
if
(
!
sinfo
->
sort_keys
)
{
...
...
@@ -452,11 +451,14 @@ int thr_write_keys(MI_SORT_PARAM *sort_param)
}
}
my_free
((
gptr
)
sinfo
->
sort_keys
,
MYF
(
0
));
my_free
(
mi_get_rec_buff_ptr
(
info
,
sinfo
->
rec_buff
),
MYF
(
MY_ALLOW_ZERO_PTR
));
my_free
(
mi_get_rec_buff_ptr
(
info
,
sinfo
->
rec_buff
),
MYF
(
MY_ALLOW_ZERO_PTR
));
sinfo
->
sort_keys
=
0
;
}
for
(
i
=
0
,
sinfo
=
sort_param
;
i
<
sort_info
->
total_keys
;
i
++
,
for
(
i
=
0
,
sinfo
=
sort_param
;
i
<
sort_info
->
total_keys
;
i
++
,
delete_dynamic
(
&
sinfo
->
buffpek
),
close_cached_file
(
&
sinfo
->
tempfile
),
close_cached_file
(
&
sinfo
->
tempfile_for_exceptions
),
...
...
myisammrg/myrg_extra.c
View file @
bd53a308
...
...
@@ -38,7 +38,8 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function,
}
else
{
if
(
function
==
HA_EXTRA_NO_CACHE
||
function
==
HA_EXTRA_RESET
)
if
(
function
==
HA_EXTRA_NO_CACHE
||
function
==
HA_EXTRA_RESET
||
function
==
HA_EXTRA_PREPARE_FOR_UPDATE
)
info
->
cache_in_use
=
0
;
if
(
function
==
HA_EXTRA_RESET
||
function
==
HA_EXTRA_RESET_STATE
)
{
...
...
mysql-test/r/multi_update.result
View file @
bd53a308
...
...
@@ -20,7 +20,7 @@ count(*)
10
select count(*) from t2 where t = "bbb";
count(*)
1
0
5
0
select count(*) from t2 where id2 > 90;
count(*)
50
...
...
@@ -70,71 +70,61 @@ create table t1(id1 int not null primary key, t varchar(100)) pack_keys = 1;
create table t2(id2 int not null, t varchar(100), index(id2)) pack_keys = 1;
delete t1 from t1,t2 where t1.id1 = t2.id2 and t1.id1 > 500;
drop table t1,t2;
DROP TABLE IF EXISTS a,b,c;
CREATE TABLE a (
CREATE TABLE t1 (
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
INSERT INTO
a
VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
CREATE TABLE
b
(
INSERT INTO
t1
VALUES (1,'aaa'),(2,'aaa'),(3,'aaa');
CREATE TABLE
t2
(
id int(11) NOT NULL default '0',
name varchar(10) default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
INSERT INTO
b
VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
CREATE TABLE
c
(
INSERT INTO
t2
VALUES (2,'bbb'),(3,'bbb'),(4,'bbb');
CREATE TABLE
t3
(
id int(11) NOT NULL default '0',
mydate datetime default NULL,
PRIMARY KEY (id)
) TYPE=MyISAM;
INSERT INTO
c
VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
INSERT INTO
t3
VALUES (1,'2002-02-04 00:00:00'),(3,'2002-05-12 00:00:00'),(5,'2002-05-12 00:00:00'),(6,'2002-06-22
00:00:00'),(7,'2002-07-22 00:00:00');
delete a,b,c from a,b,c
where to_days(now())-to_days(c.mydate)>=30
and c.id=a.id and c.id=b.id;
select * from c;
delete t1,t2,t3 from t1,t2,t3 where to_days(now())-to_days(t3.mydate)>=30 and t3.id=t1.id and t3.id=t2.id;
select * from t3;
id mydate
1 2002-02-04 00:00:00
5 2002-05-12 00:00:00
6 2002-06-22 00:00:00
7 2002-07-22 00:00:00
DROP TABLE IF EXISTS a,b,c;
drop table if exists parent, child;
CREATE TABLE IF NOT EXISTS `parent` (
DROP TABLE IF EXISTS t1,t2,t3;
CREATE TABLE IF NOT EXISTS `t1` (
`id` int(11) NOT NULL auto_increment,
`tst` text,
`tst1` text,
PRIMARY KEY (`id`)
) TYPE=MyISAM;
CREATE TABLE IF NOT EXISTS `
child
` (
CREATE TABLE IF NOT EXISTS `
t2
` (
`ID` int(11) NOT NULL auto_increment,
`ParId` int(11) default NULL,
`tst` text,
`tst1` text,
PRIMARY KEY (`ID`),
KEY `IX_ParId_
child
` (`ParId`),
FOREIGN KEY (`ParId`) REFERENCES `t
est.parent
` (`id`)
KEY `IX_ParId_
t2
` (`ParId`),
FOREIGN KEY (`ParId`) REFERENCES `t
1
` (`id`)
) TYPE=MyISAM;
INSERT INTO parent(tst,tst1)
VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
INSERT INTO child(ParId)
VALUES(1), (2), (3);
select * from child;
INSERT INTO t1(tst,tst1) VALUES("MySQL","MySQL AB"), ("MSSQL","Microsoft"), ("ORACLE","ORACLE");
INSERT INTO t2(ParId) VALUES(1), (2), (3);
select * from t2;
ID ParId tst tst1
1 1 NULL NULL
2 2 NULL NULL
3 3 NULL NULL
UPDATE child, parent
SET child.tst = parent.tst,
child.tst1 = parent.tst1
WHERE child.ParId = parent.Id;
select * from child;
UPDATE t2, t1 SET t2.tst = t1.tst, t2.tst1 = t1.tst1 WHERE t2.ParId = t1.Id;
select * from t2;
ID ParId tst tst1
1 1 MySQL MySQL AB
2 2 MSSQL Microsoft
3 3 ORACLE ORACLE
drop table parent, child;
drop table if exists t1, t2 ;
create table t1 (n numeric(10));
create table t2 (n numeric(10));
...
...
mysql-test/t/multi_update.test
View file @
bd53a308
...
...
@@ -80,67 +80,59 @@ while ($1)
enable_query_log
;
delete
t1
from
t1
,
t2
where
t1
.
id1
=
t2
.
id2
and
t1
.
id1
>
500
;
drop
table
t1
,
t2
;
DROP
TABLE
IF
EXISTS
a
,
b
,
c
;
CREATE
TABLE
a
(
CREATE
TABLE
t1
(
id
int
(
11
)
NOT
NULL
default
'0'
,
name
varchar
(
10
)
default
NULL
,
PRIMARY
KEY
(
id
)
)
TYPE
=
MyISAM
;
INSERT
INTO
a
VALUES
(
1
,
'aaa'
),(
2
,
'aaa'
),(
3
,
'aaa'
);
CREATE
TABLE
b
(
INSERT
INTO
t1
VALUES
(
1
,
'aaa'
),(
2
,
'aaa'
),(
3
,
'aaa'
);
CREATE
TABLE
t2
(
id
int
(
11
)
NOT
NULL
default
'0'
,
name
varchar
(
10
)
default
NULL
,
PRIMARY
KEY
(
id
)
)
TYPE
=
MyISAM
;
INSERT
INTO
b
VALUES
(
2
,
'bbb'
),(
3
,
'bbb'
),(
4
,
'bbb'
);
CREATE
TABLE
c
(
INSERT
INTO
t2
VALUES
(
2
,
'bbb'
),(
3
,
'bbb'
),(
4
,
'bbb'
);
CREATE
TABLE
t3
(
id
int
(
11
)
NOT
NULL
default
'0'
,
mydate
datetime
default
NULL
,
PRIMARY
KEY
(
id
)
)
TYPE
=
MyISAM
;
INSERT
INTO
c
VALUES
(
1
,
'2002-02-04 00:00:00'
),(
3
,
'2002-05-12 00:00:00'
),(
5
,
'2002-05-12 00:00:00'
),(
6
,
'2002-06-22
INSERT
INTO
t3
VALUES
(
1
,
'2002-02-04 00:00:00'
),(
3
,
'2002-05-12 00:00:00'
),(
5
,
'2002-05-12 00:00:00'
),(
6
,
'2002-06-22
00:00:00'
),(
7
,
'2002-07-22 00:00:00'
);
delete
a
,
b
,
c
from
a
,
b
,
c
where
to_days
(
now
())
-
to_days
(
c
.
mydate
)
>=
30
and
c
.
id
=
a
.
id
and
c
.
id
=
b
.
id
;
select
*
from
c
;
DROP
TABLE
IF
EXISTS
a
,
b
,
c
;
drop
table
if
exists
parent
,
child
;
CREATE
TABLE
IF
NOT
EXISTS
`parent`
(
delete
t1
,
t2
,
t3
from
t1
,
t2
,
t3
where
to_days
(
now
())
-
to_days
(
t3
.
mydate
)
>=
30
and
t3
.
id
=
t1
.
id
and
t3
.
id
=
t2
.
id
;
select
*
from
t3
;
DROP
TABLE
IF
EXISTS
t1
,
t2
,
t3
;
CREATE
TABLE
IF
NOT
EXISTS
`t1`
(
`id`
int
(
11
)
NOT
NULL
auto_increment
,
`tst`
text
,
`tst1`
text
,
PRIMARY
KEY
(
`id`
)
)
TYPE
=
MyISAM
;
CREATE
TABLE
IF
NOT
EXISTS
`
child
`
(
CREATE
TABLE
IF
NOT
EXISTS
`
t2
`
(
`ID`
int
(
11
)
NOT
NULL
auto_increment
,
`ParId`
int
(
11
)
default
NULL
,
`tst`
text
,
`tst1`
text
,
PRIMARY
KEY
(
`ID`
),
KEY
`IX_ParId_
child
`
(
`ParId`
),
FOREIGN
KEY
(
`ParId`
)
REFERENCES
`t
est.parent
`
(
`id`
)
KEY
`IX_ParId_
t2
`
(
`ParId`
),
FOREIGN
KEY
(
`ParId`
)
REFERENCES
`t
1
`
(
`id`
)
)
TYPE
=
MyISAM
;
INSERT
INTO
parent
(
tst
,
tst1
)
VALUES
(
"MySQL"
,
"MySQL AB"
),
(
"MSSQL"
,
"Microsoft"
),
(
"ORACLE"
,
"ORACLE"
);
INSERT
INTO
child
(
ParId
)
VALUES
(
1
),
(
2
),
(
3
);
INSERT
INTO
t1
(
tst
,
tst1
)
VALUES
(
"MySQL"
,
"MySQL AB"
),
(
"MSSQL"
,
"Microsoft"
),
(
"ORACLE"
,
"ORACLE"
);
select
*
from
child
;
INSERT
INTO
t2
(
ParId
)
VALUES
(
1
),
(
2
),
(
3
)
;
UPDATE
child
,
parent
SET
child
.
tst
=
parent
.
tst
,
child
.
tst1
=
parent
.
tst1
WHERE
child
.
ParId
=
parent
.
Id
;
select
*
from
t2
;
select
*
from
chil
d
;
UPDATE
t2
,
t1
SET
t2
.
tst
=
t1
.
tst
,
t2
.
tst1
=
t1
.
tst1
WHERE
t2
.
ParId
=
t1
.
I
d
;
select
*
from
t2
;
drop
table
parent
,
child
;
drop
table
if
exists
t1
,
t2
;
create
table
t1
(
n
numeric
(
10
));
create
table
t2
(
n
numeric
(
10
));
insert
into
t2
values
(
1
),(
2
),(
4
),(
8
),(
16
),(
32
);
...
...
mysys/mf_iocache.c
View file @
bd53a308
...
...
@@ -445,6 +445,8 @@ void init_io_cache_share(IO_CACHE *info, IO_CACHE_SHARE *s, uint num_threads)
s
->
active
=
0
;
/* to catch errors */
info
->
share
=
s
;
info
->
read_function
=
_my_b_read_r
;
/* Ensure that the code doesn't use pointer to the IO_CACHE object */
info
->
current_pos
=
info
->
current_end
=
0
;
}
/*
...
...
sql/item_cmpfunc.cc
View file @
bd53a308
...
...
@@ -1658,7 +1658,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i
-=
u
;
}
if
(
i
<
0
)
return
true
;
return
1
;
register
const
int
v
=
plm1
-
i
;
turboShift
=
u
-
v
;
...
...
@@ -1675,7 +1675,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j
+=
shift
;
}
return
false
;
return
0
;
}
else
{
...
...
@@ -1689,7 +1689,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
i
-=
u
;
}
if
(
i
<
0
)
return
true
;
return
1
;
register
const
int
v
=
plm1
-
i
;
turboShift
=
u
-
v
;
...
...
@@ -1706,7 +1706,7 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
}
j
+=
shift
;
}
return
false
;
return
0
;
}
}
...
...
sql/mysql_priv.h
View file @
bd53a308
...
...
@@ -242,6 +242,20 @@ typedef struct st_sql_list {
uint
elements
;
byte
*
first
;
byte
**
next
;
inline
void
empty
()
{
elements
=
0
;
first
=
0
;
next
=
&
first
;
}
inline
void
link_in_list
(
byte
*
element
,
byte
**
next_ptr
)
{
elements
++
;
(
*
next
)
=
element
;
next
=
next_ptr
;
*
next
=
0
;
}
}
SQL_LIST
;
...
...
@@ -415,6 +429,10 @@ int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields,
List
<
Item
>
&
values
,
COND
*
conds
,
ORDER
*
order
,
ha_rows
limit
,
enum
enum_duplicates
handle_duplicates
);
int
mysql_multi_update
(
THD
*
thd
,
TABLE_LIST
*
table_list
,
List
<
Item
>
*
fields
,
List
<
Item
>
*
values
,
COND
*
conds
,
ulong
options
,
enum
enum_duplicates
handle_duplicates
);
int
mysql_insert
(
THD
*
thd
,
TABLE_LIST
*
table
,
List
<
Item
>
&
fields
,
List
<
List_item
>
&
values
,
enum_duplicates
flag
);
void
kill_delayed_threads
(
void
);
...
...
@@ -498,7 +516,7 @@ TABLE_LIST *add_table_to_list(Table_ident *table,LEX_STRING *alias,
void
set_lock_for_tables
(
thr_lock_type
lock_type
);
void
add_join_on
(
TABLE_LIST
*
b
,
Item
*
expr
);
void
add_join_natural
(
TABLE_LIST
*
a
,
TABLE_LIST
*
b
);
bool
add_proc_to_list
(
Item
*
item
);
bool
add_proc_to_list
(
THD
*
thd
,
Item
*
item
);
TABLE
*
unlink_open_table
(
THD
*
thd
,
TABLE
*
list
,
TABLE
*
find
);
SQL_SELECT
*
make_select
(
TABLE
*
head
,
table_map
const_tables
,
...
...
sql/sql_base.cc
View file @
bd53a308
...
...
@@ -1942,8 +1942,8 @@ static key_map get_key_map_from_key_list(TABLE *table,
}
/****************************************************************************
**
This just drops in all fields instead of current '*' field
**
Returns pointer to last inserted field if ok
This just drops in all fields instead of current '*' field
Returns pointer to last inserted field if ok
****************************************************************************/
bool
...
...
@@ -1957,21 +1957,26 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name,
for
(;
tables
;
tables
=
tables
->
next
)
{
TABLE
*
table
=
tables
->
table
;
if
(
grant_option
&&
!
thd
->
master_access
&&
check_grant_all_columns
(
thd
,
SELECT_ACL
,
table
)
)
DBUG_RETURN
(
-
1
);
if
(
!
table_name
||
(
!
strcmp
(
table_name
,
tables
->
alias
)
&&
(
!
db_name
||
!
strcmp
(
tables
->
db
,
db_name
))))
{
/* Ensure that we have access right to all columns */
if
(
grant_option
&&
!
thd
->
master_access
&&
check_grant_all_columns
(
thd
,
SELECT_ACL
,
table
)
)
DBUG_RETURN
(
-
1
);
Field
**
ptr
=
table
->
field
,
*
field
;
thd
->
used_tables
|=
table
->
map
;
while
((
field
=
*
ptr
++
))
{
Item_field
*
item
=
new
Item_field
(
field
);
if
(
!
found
++
)
(
void
)
it
->
replace
(
item
);
(
void
)
it
->
replace
(
item
);
// Replace '*'
else
it
->
after
(
item
);
/*
Mark if field used before in this select.
Used by 'insert' to verify if a field name is used twice
*/
if
(
field
->
query_id
==
thd
->
query_id
)
thd
->
dupp_field
=
field
;
field
->
query_id
=
thd
->
query_id
;
...
...
sql/sql_class.h
View file @
bd53a308
...
...
@@ -592,7 +592,7 @@ class select_result :public Sql_alloc {
virtual
int
prepare
(
List
<
Item
>
&
list
)
{
return
0
;
}
virtual
bool
send_fields
(
List
<
Item
>
&
list
,
uint
flag
)
=
0
;
virtual
bool
send_data
(
List
<
Item
>
&
items
)
=
0
;
virtual
void
initialize_tables
(
JOIN
*
join
=
0
)
{
}
virtual
bool
initialize_tables
(
JOIN
*
join
=
0
)
{
return
0
;
}
virtual
void
send_error
(
uint
errcode
,
const
char
*
err
)
{
::
send_error
(
&
thd
->
net
,
errcode
,
err
);
...
...
@@ -656,10 +656,10 @@ class select_insert :public select_result {
List
<
Item
>
*
fields
;
ulonglong
last_insert_id
;
COPY_INFO
info
;
uint
save_time_stamp
;
select_insert
(
TABLE
*
table_par
,
List
<
Item
>
*
fields_par
,
enum_duplicates
duplic
)
:
table
(
table_par
),
fields
(
fields_par
),
last_insert_id
(
0
),
save_time_stamp
(
0
)
{
:
table
(
table_par
),
fields
(
fields_par
),
last_insert_id
(
0
)
{
bzero
((
char
*
)
&
info
,
sizeof
(
info
));
info
.
handle_duplicates
=
duplic
;
}
...
...
@@ -703,8 +703,8 @@ class select_union :public select_result {
public:
TABLE
*
table
;
COPY_INFO
info
;
uint
save_time_stamp
;
TMP_TABLE_PARAM
*
tmp_table_param
;
bool
not_describe
;
select_union
(
TABLE
*
table_par
);
~
select_union
();
...
...
@@ -814,37 +814,36 @@ class Unique :public Sql_alloc
bool
send_fields
(
List
<
Item
>
&
list
,
uint
flag
)
{
return
0
;
}
bool
send_data
(
List
<
Item
>
&
items
);
void
initialize_tables
(
JOIN
*
join
);
bool
initialize_tables
(
JOIN
*
join
);
void
send_error
(
uint
errcode
,
const
char
*
err
);
int
do_deletes
(
bool
from_send_error
);
bool
send_eof
();
};
class
multi_update
:
public
select_result
{
TABLE_LIST
*
update_tables
,
*
table_being_updated
;
// Unique **tempfiles;
COPY_INFO
*
infos
;
TABLE
**
tmp_tables
;
class
multi_update
:
public
select_result
{
TABLE_LIST
*
all_tables
,
*
update_tables
,
*
table_being_updated
;
THD
*
thd
;
TABLE
**
tmp_tables
,
*
main_table
;
TMP_TABLE_PARAM
*
tmp_table_param
;
ha_rows
updated
,
found
;
List
<
Item
>
field
s
;
List
<
Item
>
**
fields_by_tables
;
enum
enum_duplicates
dupl
;
uint
num_of_tables
,
num_fields
,
num_updated
,
*
save_time_stamps
,
*
field_sequence
;
int
error
;
bool
do_update
,
not_trans_safe
;
public:
multi_update
(
THD
*
thd_arg
,
TABLE_LIST
*
ut
,
List
<
Item
>
&
fs
,
enum
enum_duplicates
handle_duplicates
,
uint
num
);
List
<
Item
>
*
fields
,
*
value
s
;
List
<
Item
>
**
fields_for_table
,
**
values_for_table
;
uint
table_count
;
Copy_field
*
copy_field
;
enum
enum_duplicates
handle_duplicates
;
bool
do_update
,
trans_safe
,
transactional_tables
,
log_delayed
;
public:
multi_update
(
THD
*
thd_arg
,
TABLE_LIST
*
ut
,
List
<
Item
>
*
fields
,
List
<
Item
>
*
values
,
enum_duplicates
handle_duplicates
);
~
multi_update
();
int
prepare
(
List
<
Item
>
&
list
);
bool
send_fields
(
List
<
Item
>
&
list
,
uint
flag
)
{
return
0
;
}
bool
send_fields
(
List
<
Item
>
&
list
,
uint
flag
)
{
return
0
;
}
bool
send_data
(
List
<
Item
>
&
items
);
void
initialize_tables
(
JOIN
*
join
);
bool
initialize_tables
(
JOIN
*
join
);
void
send_error
(
uint
errcode
,
const
char
*
err
);
int
do_updates
(
bool
from_send_error
);
bool
send_eof
();
};
};
sql/sql_delete.cc
View file @
bd53a308
...
...
@@ -213,12 +213,13 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
extern
"C"
int
refposcmp2
(
void
*
arg
,
const
void
*
a
,
const
void
*
b
)
{
/* arg is a pointer to file->ref_length */
return
memcmp
(
a
,
b
,
*
(
int
*
)
arg
);
}
multi_delete
::
multi_delete
(
THD
*
thd_arg
,
TABLE_LIST
*
dt
,
uint
num_of_tables_arg
)
:
delete_tables
(
dt
),
thd
(
thd_arg
),
deleted
(
0
),
:
delete_tables
(
dt
),
thd
(
thd_arg
),
deleted
(
0
),
num_of_tables
(
num_of_tables_arg
),
error
(
0
),
do_delete
(
0
),
transactional_tables
(
0
),
log_delayed
(
0
),
normal_tables
(
0
)
{
...
...
@@ -230,31 +231,22 @@ int
multi_delete
::
prepare
(
List
<
Item
>
&
values
)
{
DBUG_ENTER
(
"multi_delete::prepare"
);
do_delete
=
true
;
do_delete
=
1
;
thd
->
proc_info
=
"deleting from main table"
;
if
(
thd
->
options
&
OPTION_SAFE_UPDATES
)
{
TABLE_LIST
*
table_ref
;
for
(
table_ref
=
delete_tables
;
table_ref
;
table_ref
=
table_ref
->
next
)
{
TABLE
*
table
=
table_ref
->
table
;
if
((
thd
->
options
&
OPTION_SAFE_UPDATES
)
&&
!
table
->
quick_keys
)
{
my_error
(
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
,
MYF
(
0
));
DBUG_RETURN
(
1
);
}
}
}
DBUG_RETURN
(
0
);
}
void
bool
multi_delete
::
initialize_tables
(
JOIN
*
join
)
{
int
counter
=
0
;
TABLE_LIST
*
walk
;
Unique
**
tempfiles_ptr
;
DBUG_ENTER
(
"initialize_tables"
);
if
((
thd
->
options
&
OPTION_SAFE_UPDATES
)
&&
error_if_full_join
(
join
))
DBUG_RETURN
(
1
);
table_map
tables_to_delete_from
=
0
;
for
(
walk
=
delete_tables
;
walk
;
walk
=
walk
->
next
)
tables_to_delete_from
|=
walk
->
table
->
map
;
...
...
@@ -268,9 +260,10 @@ multi_delete::initialize_tables(JOIN *join)
{
/* We are going to delete from this table */
TABLE
*
tbl
=
walk
->
table
=
tab
->
table
;
walk
=
walk
->
next
;
/* Don't use KEYREAD optimization on this table */
tbl
->
no_keyread
=
1
;
walk
=
walk
->
next
;
tbl
->
used_keys
=
0
;
if
(
tbl
->
file
->
has_transactions
())
log_delayed
=
transactional_tables
=
1
;
else
if
(
tbl
->
tmp_table
!=
NO_TMP_TABLE
)
...
...
@@ -280,19 +273,17 @@ multi_delete::initialize_tables(JOIN *join)
}
}
walk
=
delete_tables
;
walk
->
table
->
used_keys
=
0
;
for
(
walk
=
walk
->
next
;
walk
;
walk
=
walk
->
next
,
counter
++
)
tempfiles_ptr
=
tempfiles
;
for
(
walk
=
walk
->
next
;
walk
;
walk
=
walk
->
next
)
{
tables_to_delete_from
|=
walk
->
table
->
map
;
TABLE
*
table
=
walk
->
table
;
/* Don't use key read with MULTI-TABLE-DELETE */
table
->
used_keys
=
0
;
tempfiles
[
counter
]
=
new
Unique
(
refposcmp2
,
*
tempfiles_ptr
++=
new
Unique
(
refposcmp2
,
(
void
*
)
&
table
->
file
->
ref_length
,
table
->
file
->
ref_length
,
MEM_STRIP_BUF_SIZE
);
}
init_ftfuncs
(
thd
,
1
);
DBUG_RETURN
(
thd
->
fatal_error
!=
0
);
}
...
...
@@ -307,7 +298,7 @@ multi_delete::~multi_delete()
t
->
no_keyread
=
0
;
}
for
(
uint
counter
=
0
;
counter
<
num_of_tables
-
1
;
counter
++
)
for
(
uint
counter
=
0
;
counter
<
num_of_tables
-
1
;
counter
++
)
{
if
(
tempfiles
[
counter
])
delete
tempfiles
[
counter
];
...
...
@@ -414,7 +405,7 @@ int multi_delete::do_deletes(bool from_send_error)
else
table_being_deleted
=
delete_tables
;
do_delete
=
false
;
do_delete
=
0
;
for
(
table_being_deleted
=
table_being_deleted
->
next
;
table_being_deleted
;
table_being_deleted
=
table_being_deleted
->
next
,
counter
++
)
...
...
@@ -468,7 +459,7 @@ bool multi_delete::send_eof()
was a non-transaction-safe table involved, since
modifications in it cannot be rolled back.
*/
if
(
deleted
)
if
(
deleted
&&
(
error
<=
0
||
normal_tables
)
)
{
mysql_update_log
.
write
(
thd
,
thd
->
query
,
thd
->
query_length
);
if
(
mysql_bin_log
.
is_open
())
...
...
@@ -478,11 +469,17 @@ bool multi_delete::send_eof()
if
(
mysql_bin_log
.
write
(
&
qinfo
)
&&
!
normal_tables
)
local_error
=
1
;
// Log write failed: roll back the SQL statement
}
if
(
!
log_delayed
)
thd
->
options
|=
OPTION_STATUS_NO_TRANS_UPDATE
;
}
/* Commit or rollback the current SQL statement */
VOID
(
ha_autocommit_or_rollback
(
thd
,
local_error
>
0
));
if
(
transactional_tables
)
if
(
ha_autocommit_or_rollback
(
thd
,
local_error
>
0
))
local_error
=
1
;
if
(
deleted
)
query_cache_invalidate3
(
thd
,
delete_tables
,
1
);
}
if
(
local_error
)
::
send_error
(
&
thd
->
net
);
else
...
...
sql/sql_insert.cc
View file @
bd53a308
...
...
@@ -41,7 +41,8 @@ static void unlink_blobs(register TABLE *table);
/*
Check if insert fields are correct
Resets form->time_stamp if a timestamp value is set
Updates table->time_stamp to point to timestamp field or 0, depending on
if timestamp should be updated or not.
*/
static
int
...
...
@@ -87,9 +88,10 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields,
my_error
(
ER_FIELD_SPECIFIED_TWICE
,
MYF
(
0
),
thd
->
dupp_field
->
field_name
);
return
-
1
;
}
table
->
time_stamp
=
0
;
if
(
table
->
timestamp_field
&&
// Don't set timestamp if used
table
->
timestamp_field
->
query_id
=
=
thd
->
query_id
)
table
->
time_stamp
=
0
;
// This should be saved
table
->
timestamp_field
->
query_id
!
=
thd
->
query_id
)
table
->
time_stamp
=
table
->
timestamp_field
->
offset
()
+
1
;
}
// For the values we need select_priv
table
->
grant
.
want_privilege
=
(
SELECT_ACL
&
~
table
->
grant
.
privilege
);
...
...
@@ -105,7 +107,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
!
(
thd
->
master_access
&
SUPER_ACL
));
bool
transactional_table
,
log_delayed
,
bulk_insert
=
0
;
uint
value_count
;
uint
save_time_stamp
;
ulong
counter
=
1
;
ulonglong
id
;
COPY_INFO
info
;
...
...
@@ -150,14 +151,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
DBUG_RETURN
(
-
1
);
thd
->
proc_info
=
"init"
;
thd
->
used_tables
=
0
;
save_time_stamp
=
table
->
time_stamp
;
values
=
its
++
;
if
(
check_insert_fields
(
thd
,
table
,
fields
,
*
values
,
1
)
||
setup_tables
(
table_list
)
||
setup_fields
(
thd
,
table_list
,
*
values
,
0
,
0
,
0
))
{
table
->
time_stamp
=
save_time_stamp
;
goto
abort
;
}
value_count
=
values
->
elements
;
while
((
values
=
its
++
))
{
...
...
@@ -167,15 +164,11 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
my_printf_error
(
ER_WRONG_VALUE_COUNT_ON_ROW
,
ER
(
ER_WRONG_VALUE_COUNT_ON_ROW
),
MYF
(
0
),
counter
);
table
->
time_stamp
=
save_time_stamp
;
goto
abort
;
}
if
(
setup_fields
(
thd
,
table_list
,
*
values
,
0
,
0
,
0
))
{
table
->
time_stamp
=
save_time_stamp
;
goto
abort
;
}
}
its
.
rewind
();
/*
Fill in the given fields and dump it to the table file
...
...
@@ -333,7 +326,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields,
}
}
thd
->
proc_info
=
"end"
;
table
->
time_stamp
=
save_time_stamp
;
// Restore auto timestamp ptr
table
->
next_number_field
=
0
;
thd
->
count_cuted_fields
=
0
;
thd
->
next_insert_id
=
0
;
// Reset this if wrongly used
...
...
@@ -1287,7 +1279,6 @@ select_insert::prepare(List<Item> &values)
{
DBUG_ENTER
(
"select_insert::prepare"
);
save_time_stamp
=
table
->
time_stamp
;
if
(
check_insert_fields
(
thd
,
table
,
*
fields
,
values
,
1
))
DBUG_RETURN
(
1
);
...
...
@@ -1308,8 +1299,6 @@ select_insert::~select_insert()
{
if
(
table
)
{
if
(
save_time_stamp
)
table
->
time_stamp
=
save_time_stamp
;
table
->
next_number_field
=
0
;
table
->
file
->
extra
(
HA_EXTRA_RESET
);
}
...
...
@@ -1412,7 +1401,6 @@ select_create::prepare(List<Item> &values)
/* First field to copy */
field
=
table
->
field
+
table
->
fields
-
values
.
elements
;
save_time_stamp
=
table
->
time_stamp
;
if
(
table
->
timestamp_field
)
// Don't set timestamp if used
{
table
->
timestamp_field
->
set_time
();
...
...
sql/sql_olap.cc
View file @
bd53a308
...
...
@@ -75,7 +75,7 @@ static int make_new_olap_select(LEX *lex, SELECT_LEX *select_lex, List<Item> new
!
strcmp
(((
Item_field
*
)
new_item
)
->
table_name
,
iif
->
table_name
)
&&
!
strcmp
(((
Item_field
*
)
new_item
)
->
field_name
,
iif
->
field_name
))
{
not_found
=
false
;
not_found
=
0
;
((
Item_field
*
)
new_item
)
->
db_name
=
iif
->
db_name
;
Item_field
*
new_one
=
new
Item_field
(
iif
->
db_name
,
iif
->
table_name
,
iif
->
field_name
);
privlist
.
push_back
(
new_one
);
...
...
@@ -151,7 +151,7 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex)
if
(
cursor
->
do_redirect
)
{
cursor
->
table
=
((
TABLE_LIST
*
)
cursor
->
table
)
->
table
;
cursor
->
do_redirect
=
false
;
cursor
->
do_redirect
=
0
;
}
}
...
...
sql/sql_parse.cc
View file @
bd53a308
...
...
@@ -422,7 +422,7 @@ static bool check_mqh(THD *thd, uint check_command)
}
static
void
reset_mqh
(
THD
*
thd
,
LEX_USER
*
lu
,
bool
get_them
=
false
)
static
void
reset_mqh
(
THD
*
thd
,
LEX_USER
*
lu
,
bool
get_them
=
0
)
{
(
void
)
pthread_mutex_lock
(
&
LOCK_user_conn
);
...
...
@@ -1828,57 +1828,25 @@ mysql_execute_command(void)
}
else
{
multi_update
*
result
;
uint
table_count
;
TABLE_LIST
*
auxi
;
const
char
*
msg
=
0
;
lex
->
sql_command
=
SQLCOM_MULTI_UPDATE
;
for
(
auxi
=
(
TABLE_LIST
*
)
tables
,
table_count
=
0
;
auxi
;
auxi
=
auxi
->
next
)
table_count
++
;
const
char
*
msg
=
0
;
lex
->
sql_command
=
SQLCOM_MULTI_UPDATE
;
if
(
select_lex
->
order_list
.
elements
)
msg
=
"ORDER BY"
;
else
if
(
select_lex
->
select_limit
&&
select_lex
->
select_limit
!=
HA_POS_ERROR
)
msg
=
"LIMIT"
;
if
(
msg
)
{
net_printf
(
&
thd
->
net
,
ER_WRONG_USAGE
,
"UPDATE"
,
msg
);
res
=
1
;
break
;
}
tables
->
grant
.
want_privilege
=
(
SELECT_ACL
&
~
tables
->
grant
.
privilege
);
if
((
res
=
open_and_lock_tables
(
thd
,
tables
)))
break
;
thd
->
select_limit
=
HA_POS_ERROR
;
if
(
!
setup_fields
(
thd
,
tables
,
select_lex
->
item_list
,
1
,
0
,
0
)
&&
!
setup_fields
(
thd
,
tables
,
lex
->
value_list
,
0
,
0
,
0
)
&&
!
thd
->
fatal_error
&&
(
result
=
new
multi_update
(
thd
,
tables
,
select_lex
->
item_list
,
lex
->
duplicates
,
table_count
)))
{
List
<
Item
>
total_list
;
List_iterator
<
Item
>
field_list
(
select_lex
->
item_list
);
List_iterator
<
Item
>
value_list
(
lex
->
value_list
);
Item
*
item
;
while
((
item
=
field_list
++
))
total_list
.
push_back
(
item
);
while
((
item
=
value_list
++
))
total_list
.
push_back
(
item
);
res
=
mysql_select
(
thd
,
tables
,
total_list
,
res
=
mysql_multi_update
(
thd
,
tables
,
&
select_lex
->
item_list
,
&
lex
->
value_list
,
select_lex
->
where
,
(
ORDER
*
)
NULL
,(
ORDER
*
)
NULL
,(
Item
*
)
NULL
,
(
ORDER
*
)
NULL
,
select_lex
->
options
|
thd
->
options
|
SELECT_NO_JOIN_CACHE
,
result
);
delete
result
;
}
else
res
=
-
1
;
// Error is not sent
close_thread_tables
(
thd
);
select_lex
->
options
,
lex
->
duplicates
);
}
break
;
case
SQLCOM_INSERT
:
...
...
@@ -2741,11 +2709,8 @@ mysql_init_select(LEX *lex)
select_lex
->
olap
=
UNSPECIFIED_OLAP_TYPE
;
lex
->
exchange
=
0
;
lex
->
proc_list
.
first
=
0
;
select_lex
->
order_list
.
elements
=
select_lex
->
group_list
.
elements
=
0
;
select_lex
->
order_list
.
first
=
0
;
select_lex
->
order_list
.
next
=
(
byte
**
)
&
select_lex
->
order_list
.
first
;
select_lex
->
group_list
.
first
=
0
;
select_lex
->
group_list
.
next
=
(
byte
**
)
&
select_lex
->
group_list
.
first
;
select_lex
->
order_list
.
empty
();
select_lex
->
group_list
.
empty
();
select_lex
->
next
=
(
SELECT_LEX
*
)
NULL
;
}
...
...
@@ -2818,16 +2783,6 @@ mysql_parse(THD *thd,char *inBuf,uint length)
}
inline
static
void
link_in_list
(
SQL_LIST
*
list
,
byte
*
element
,
byte
**
next
)
{
list
->
elements
++
;
(
*
list
->
next
)
=
element
;
list
->
next
=
next
;
*
next
=
0
;
}
/*****************************************************************************
** Store field definition for create
** Return 0 if ok
...
...
@@ -3102,7 +3057,7 @@ void store_position_for_column(const char *name)
}
bool
add_proc_to_list
(
Item
*
item
)
add_proc_to_list
(
THD
*
thd
,
Item
*
item
)
{
ORDER
*
order
;
Item
**
item_ptr
;
...
...
@@ -3113,7 +3068,7 @@ add_proc_to_list(Item *item)
*
item_ptr
=
item
;
order
->
item
=
item_ptr
;
order
->
free_me
=
0
;
link_in_list
(
&
current_lex
->
proc_list
,
(
byte
*
)
order
,(
byte
**
)
&
order
->
next
);
thd
->
lex
.
proc_list
.
link_in_list
(
(
byte
*
)
order
,(
byte
**
)
&
order
->
next
);
return
0
;
}
...
...
@@ -3167,7 +3122,7 @@ bool add_to_list(SQL_LIST &list,Item *item,bool asc)
order
->
asc
=
asc
;
order
->
free_me
=
0
;
order
->
used
=
0
;
li
nk_in_list
(
&
list
,
(
byte
*
)
order
,(
byte
**
)
&
order
->
next
);
li
st
.
link_in_list
(
(
byte
*
)
order
,(
byte
**
)
&
order
->
next
);
DBUG_RETURN
(
0
);
}
...
...
@@ -3248,7 +3203,7 @@ TABLE_LIST *add_table_to_list(Table_ident *table, LEX_STRING *alias,
}
}
}
link_in_list
(
&
thd
->
lex
.
select
->
table_list
,
(
byte
*
)
ptr
,(
byte
**
)
&
ptr
->
next
);
thd
->
lex
.
select
->
table_list
.
link_in_list
(
(
byte
*
)
ptr
,(
byte
**
)
&
ptr
->
next
);
DBUG_RETURN
(
ptr
);
}
...
...
sql/sql_select.cc
View file @
bd53a308
...
...
@@ -437,7 +437,8 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
if
(
make_join_statistics
(
&
join
,
tables
,
conds
,
&
keyuse
)
||
thd
->
fatal_error
)
goto
err
;
thd
->
proc_info
=
"preparing"
;
result
->
initialize_tables
(
&
join
);
if
(
result
->
initialize_tables
(
&
join
))
goto
err
;
if
(
join
.
const_table_map
!=
join
.
found_const_table_map
&&
!
(
select_options
&
SELECT_DESCRIBE
))
{
...
...
@@ -2721,6 +2722,38 @@ make_join_readinfo(JOIN *join,uint options)
}
/*
Give error if we some tables are done with a full join
SYNOPSIS
error_if_full_join()
join Join condition
USAGE
This is used by multi_table_update and multi_table_delete when running
in safe mode
RETURN VALUES
0 ok
1 Error (full join used)
*/
bool
error_if_full_join
(
JOIN
*
join
)
{
for
(
JOIN_TAB
*
tab
=
join
->
join_tab
,
*
end
=
join
->
join_tab
+
join
->
tables
;
tab
<
end
;
tab
++
)
{
if
(
tab
->
type
==
JT_ALL
&&
!
tab
->
select
->
quick
)
{
my_error
(
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
,
MYF
(
0
));
return
(
1
);
}
}
return
(
0
);
}
static
void
join_free
(
JOIN
*
join
)
{
...
...
@@ -3401,12 +3434,34 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
/****************************************************************************
Create a temp table according to a field list.
Set distinct if duplicates could be removed
Given fields field pointers are changed to point at tmp_table
for send_fields
Create internal temporary table
****************************************************************************/
/*
Create field for temporary table
SYNOPSIS
create_tmp_field()
thd Thread handler
table Temporary table
item Item to create a field for
type Type of item (normally item->type)
copy_func If set and item is a function, store copy of item
in this array
group 1 if we are going to do a relative group by on result
modify_item 1 if item->result_field should point to new item.
This is relevent for how fill_record() is going to
work:
If modify_item is 1 then fill_record() will update
the record in the original table.
If modify_item is 0 then fill_record() will update
the temporary table
RETURN
0 on error
new_created field
*/
Field
*
create_tmp_field
(
THD
*
thd
,
TABLE
*
table
,
Item
*
item
,
Item
::
Type
type
,
Item_result_field
***
copy_func
,
Field
**
from_field
,
bool
group
,
bool
modify_item
)
...
...
@@ -3515,6 +3570,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
/*
Create a temp table according to a field list.
Set distinct if duplicates could be removed
Given fields field pointers are changed to point at tmp_table
for send_fields
*/
TABLE
*
create_tmp_table
(
THD
*
thd
,
TMP_TABLE_PARAM
*
param
,
List
<
Item
>
&
fields
,
ORDER
*
group
,
bool
distinct
,
bool
save_sum_fields
,
...
...
@@ -3675,9 +3737,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
/*
The last parameter to create_tmp_field() is a bit tricky:
We need to set it to 0 in union, to get fill_record() to modify the
temporary table.
We need to set it to 1 on multi-table-update and in select to
write rows to the temporary table.
We here distinguish between UNION and multi-table-updates by the fact
that in the later case group is set to the row pointer.
*/
Field
*
new_field
=
create_tmp_field
(
thd
,
table
,
item
,
type
,
&
copy_func
,
tmp_from_field
,
group
!=
0
,
not_all_columns
);
not_all_columns
||
group
!=
0
);
if
(
!
new_field
)
{
if
(
thd
->
fatal_error
)
...
...
@@ -3991,7 +4063,6 @@ static bool open_tmp_table(TABLE *table)
table
->
db_stat
=
0
;
return
(
1
);
}
/* VOID(ha_lock(table,F_WRLCK)); */
/* Single thread table */
(
void
)
table
->
file
->
extra
(
HA_EXTRA_QUICK
);
/* Faster */
return
(
0
);
}
...
...
sql/sql_select.h
View file @
bd53a308
...
...
@@ -301,3 +301,4 @@ class store_key_const_item :public store_key_item
};
bool
cp_buffer_from_ref
(
TABLE_REF
*
ref
);
bool
error_if_full_join
(
JOIN
*
join
);
sql/sql_union.cc
View file @
bd53a308
...
...
@@ -33,7 +33,7 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
TABLE
*
table
;
int
describe
=
(
lex
->
select_lex
.
options
&
SELECT_DESCRIBE
)
?
1
:
0
;
int
res
;
bool
found_rows_for_union
=
false
;
bool
found_rows_for_union
=
0
;
TABLE_LIST
result_table_list
;
TABLE_LIST
*
first_table
=
(
TABLE_LIST
*
)
lex
->
select_lex
.
table_list
.
first
;
TMP_TABLE_PARAM
tmp_table_param
;
...
...
@@ -53,7 +53,7 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
if
(
cursor
->
do_redirect
)
// False if CUBE/ROLLUP
{
cursor
->
table
=
((
TABLE_LIST
*
)
cursor
->
table
)
->
table
;
cursor
->
do_redirect
=
false
;
cursor
->
do_redirect
=
0
;
}
}
}
...
...
@@ -138,7 +138,7 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
res
=
-
1
;
goto
exit
;
}
union_result
->
save_time_stamp
=
!
describe
;
union_result
->
not_describe
=
!
describe
;
union_result
->
tmp_table_param
=&
tmp_table_param
;
for
(
sl
=
&
lex
->
select_lex
;
sl
;
sl
=
sl
->
next
)
{
...
...
@@ -150,14 +150,17 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
if
(
thd
->
select_limit
==
HA_POS_ERROR
)
sl
->
options
&=
~
OPTION_FOUND_ROWS
;
res
=
mysql_select
(
thd
,
(
describe
&&
sl
->
linkage
==
NOT_A_SELECT
)
?
first_table
:
(
TABLE_LIST
*
)
sl
->
table_list
.
first
,
res
=
mysql_select
(
thd
,
(
describe
&&
sl
->
linkage
==
NOT_A_SELECT
)
?
first_table
:
(
TABLE_LIST
*
)
sl
->
table_list
.
first
,
sl
->
item_list
,
sl
->
where
,
(
sl
->
braces
)
?
(
ORDER
*
)
sl
->
order_list
.
first
:
(
ORDER
*
)
0
,
(
sl
->
braces
)
?
(
ORDER
*
)
sl
->
order_list
.
first
:
(
ORDER
*
)
0
,
(
ORDER
*
)
sl
->
group_list
.
first
,
sl
->
having
,
(
ORDER
*
)
NULL
,
sl
->
options
|
thd
->
options
|
SELECT_NO_UNLOCK
|
((
describe
)
?
SELECT_DESCRIBE
:
0
),
sl
->
options
|
thd
->
options
|
SELECT_NO_UNLOCK
|
((
describe
)
?
SELECT_DESCRIBE
:
0
),
union_result
);
if
(
res
)
goto
exit
;
...
...
@@ -226,7 +229,7 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
***************************************************************************/
select_union
::
select_union
(
TABLE
*
table_par
)
:
table
(
table_par
)
:
table
(
table_par
),
not_describe
(
0
)
{
bzero
((
char
*
)
&
info
,
sizeof
(
info
));
/*
...
...
@@ -243,7 +246,7 @@ select_union::~select_union()
int
select_union
::
prepare
(
List
<
Item
>
&
list
)
{
if
(
save_time_stamp
&&
list
.
elements
!=
table
->
fields
)
if
(
not_describe
&&
list
.
elements
!=
table
->
fields
)
{
my_message
(
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT
,
ER
(
ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT
),
MYF
(
0
));
...
...
sql/sql_update.cc
View file @
bd53a308
...
...
@@ -29,10 +29,12 @@ static bool compare_record(TABLE *table, ulong query_id)
{
if
(
!
table
->
blob_fields
)
return
cmp_record
(
table
,
1
);
/* Compare null bits */
if
(
memcmp
(
table
->
null_flags
,
table
->
null_flags
+
table
->
rec_buff_length
,
table
->
null_bytes
))
return
1
;
// Diff in NULL value
/* Compare updated fields */
for
(
Field
**
ptr
=
table
->
field
;
*
ptr
;
ptr
++
)
{
if
((
*
ptr
)
->
query_id
==
query_id
&&
...
...
@@ -52,10 +54,11 @@ int mysql_update(THD *thd,
ha_rows
limit
,
enum
enum_duplicates
handle_duplicates
)
{
bool
using_limit
=
limit
!=
HA_POS_ERROR
,
safe_update
=
thd
->
options
&
OPTION_SAFE_UPDATES
;
bool
using_limit
=
limit
!=
HA_POS_ERROR
;
bool
safe_update
=
thd
->
options
&
OPTION_SAFE_UPDATES
;
bool
used_key_is_modified
,
transactional_table
,
log_delayed
;
int
error
=
0
;
uint
save_time_stamp
,
used_index
,
want_privilege
;
uint
used_index
,
want_privilege
;
ulong
query_id
=
thd
->
query_id
,
timestamp_query_id
;
key_map
old_used_keys
;
TABLE
*
table
;
...
...
@@ -67,7 +70,6 @@ int mysql_update(THD *thd,
if
(
!
(
table
=
open_ltable
(
thd
,
table_list
,
table_list
->
lock_type
)))
DBUG_RETURN
(
-
1
);
/* purecov: inspected */
save_time_stamp
=
table
->
time_stamp
;
table
->
file
->
info
(
HA_STATUS_VARIABLE
|
HA_STATUS_NO_LOCK
);
thd
->
proc_info
=
"init"
;
...
...
@@ -89,6 +91,7 @@ int mysql_update(THD *thd,
{
timestamp_query_id
=
table
->
timestamp_field
->
query_id
;
table
->
timestamp_field
->
query_id
=
thd
->
query_id
-
1
;
table
->
time_stamp
=
table
->
timestamp_field
->
offset
()
+
1
;
}
/* Check the fields we are going to modify */
...
...
@@ -108,7 +111,6 @@ int mysql_update(THD *thd,
table
->
grant
.
want_privilege
=
(
SELECT_ACL
&
~
table
->
grant
.
privilege
);
if
(
setup_fields
(
thd
,
table_list
,
values
,
0
,
0
,
0
))
{
table
->
time_stamp
=
save_time_stamp
;
// Restore timestamp pointer
DBUG_RETURN
(
-
1
);
/* purecov: inspected */
}
...
...
@@ -119,7 +121,6 @@ int mysql_update(THD *thd,
(
select
&&
select
->
check_quick
(
safe_update
,
limit
))
||
!
limit
)
{
delete
select
;
table
->
time_stamp
=
save_time_stamp
;
// Restore timestamp pointer
if
(
error
)
{
DBUG_RETURN
(
-
1
);
// Error in where
...
...
@@ -134,7 +135,6 @@ int mysql_update(THD *thd,
if
(
safe_update
&&
!
using_limit
)
{
delete
select
;
table
->
time_stamp
=
save_time_stamp
;
send_error
(
&
thd
->
net
,
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
);
DBUG_RETURN
(
1
);
}
...
...
@@ -153,8 +153,8 @@ int mysql_update(THD *thd,
if
(
used_key_is_modified
||
order
)
{
/*
**
We can't update table directly; We must first search after all
**
matching rows before updating the table!
We can't update table directly; We must first search after all
matching rows before updating the table!
*/
table
->
file
->
extra
(
HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE
);
IO_CACHE
tempfile
;
...
...
@@ -162,7 +162,6 @@ int mysql_update(THD *thd,
DISK_BUFFER_SIZE
,
MYF
(
MY_WME
)))
{
delete
select
;
/* purecov: inspected */
table
->
time_stamp
=
save_time_stamp
;
// Restore timestamp pointer /* purecov: inspected */
DBUG_RETURN
(
-
1
);
}
if
(
old_used_keys
&
((
key_map
)
1
<<
used_index
))
...
...
@@ -193,7 +192,6 @@ int mysql_update(THD *thd,
==
HA_POS_ERROR
)
{
delete
select
;
table
->
time_stamp
=
save_time_stamp
;
// Restore timestamp pointer
DBUG_RETURN
(
-
1
);
}
}
...
...
@@ -247,7 +245,6 @@ int mysql_update(THD *thd,
if
(
error
>=
0
)
{
delete
select
;
table
->
time_stamp
=
save_time_stamp
;
// Restore timestamp pointer
DBUG_RETURN
(
-
1
);
}
}
...
...
@@ -297,7 +294,6 @@ int mysql_update(THD *thd,
end_read_record
(
&
info
);
thd
->
proc_info
=
"end"
;
VOID
(
table
->
file
->
extra
(
HA_EXTRA_NO_IGNORE_DUP_KEY
));
table
->
time_stamp
=
save_time_stamp
;
// Restore auto timestamp pointer
transactional_table
=
table
->
file
->
has_transactions
();
log_delayed
=
(
transactional_table
||
table
->
tmp_table
);
if
(
updated
&&
(
error
<=
0
||
!
transactional_table
))
...
...
@@ -351,324 +347,338 @@ int mysql_update(THD *thd,
DBUG_RETURN
(
0
);
}
/***************************************************************************
Update multiple tables from join
***************************************************************************/
multi_update
::
multi_update
(
THD
*
thd_arg
,
TABLE_LIST
*
ut
,
List
<
Item
>
&
fs
,
enum
enum_duplicates
handle_duplicates
,
uint
num
)
:
update_tables
(
ut
),
thd
(
thd_arg
),
updated
(
0
),
found
(
0
),
fields
(
fs
),
dupl
(
handle_duplicates
),
num_of_tables
(
num
),
num_fields
(
0
),
num_updated
(
0
),
error
(
0
),
do_update
(
false
)
/*
Setup multi-update handling and call SELECT to do the join
*/
int
mysql_multi_update
(
THD
*
thd
,
TABLE_LIST
*
table_list
,
List
<
Item
>
*
fields
,
List
<
Item
>
*
values
,
COND
*
conds
,
ulong
options
,
enum
enum_duplicates
handle_duplicates
)
{
save_time_stamps
=
(
uint
*
)
sql_calloc
(
sizeof
(
uint
)
*
num_of_tables
);
tmp_tables
=
(
TABLE
**
)
NULL
;
int
counter
=
0
;
ulong
timestamp_query_id
;
not_trans_safe
=
false
;
for
(
TABLE_LIST
*
dt
=
ut
;
dt
;
dt
=
dt
->
next
,
counter
++
)
{
TABLE
*
table
=
ut
->
table
;
// (void) ut->table->file->extra(HA_EXTRA_NO_KEYREAD);
dt
->
table
->
used_keys
=
0
;
int
res
;
multi_update
*
result
;
TABLE_LIST
*
tl
;
DBUG_ENTER
(
"mysql_multi_update"
);
table_list
->
grant
.
want_privilege
=
(
SELECT_ACL
&
~
table_list
->
grant
.
privilege
);
if
((
res
=
open_and_lock_tables
(
thd
,
table_list
)))
DBUG_RETURN
(
res
);
thd
->
select_limit
=
HA_POS_ERROR
;
if
(
setup_fields
(
thd
,
table_list
,
*
fields
,
1
,
0
,
0
))
DBUG_RETURN
(
-
1
);
/*
Count tables and setup timestamp handling
*/
for
(
tl
=
(
TABLE_LIST
*
)
table_list
;
tl
;
tl
=
tl
->
next
)
{
TABLE
*
table
=
tl
->
table
;
if
(
table
->
timestamp_field
)
{
// Don't set timestamp column if this is modified
timestamp_query_id
=
table
->
timestamp_field
->
query_id
;
table
->
timestamp_field
->
query_id
=
thd
->
query_id
-
1
;
if
(
table
->
timestamp_field
->
query_id
==
thd
->
query_id
)
table
->
time_stamp
=
0
;
else
table
->
timestamp_field
->
query_id
=
timestamp_query_id
;
// Only set timestamp column if this is not modified
if
(
table
->
timestamp_field
->
query_id
!=
thd
->
query_id
)
table
->
time_stamp
=
table
->
timestamp_field
->
offset
()
+
1
;
}
save_time_stamps
[
counter
]
=
table
->
time_stamp
;
}
error
=
1
;
// In case we do not reach prepare we have to reset timestamps
if
(
!
(
result
=
new
multi_update
(
thd
,
table_list
,
fields
,
values
,
handle_duplicates
)))
DBUG_RETURN
(
-
1
);
List
<
Item
>
total_list
;
res
=
mysql_select
(
thd
,
table_list
,
total_list
,
conds
,
(
ORDER
*
)
NULL
,
(
ORDER
*
)
NULL
,
(
Item
*
)
NULL
,
(
ORDER
*
)
NULL
,
options
|
SELECT_NO_JOIN_CACHE
,
result
);
end:
delete
result
;
DBUG_RETURN
(
res
);
}
int
multi_update
::
prepare
(
List
<
Item
>
&
values
)
multi_update
::
multi_update
(
THD
*
thd_arg
,
TABLE_LIST
*
table_list
,
List
<
Item
>
*
field_list
,
List
<
Item
>
*
value_list
,
enum
enum_duplicates
handle_duplicates_arg
)
:
all_tables
(
table_list
),
thd
(
thd_arg
),
tmp_tables
(
0
),
updated
(
0
),
found
(
0
),
fields
(
field_list
),
values
(
value_list
),
table_count
(
0
),
handle_duplicates
(
handle_duplicates_arg
),
do_update
(
1
)
{}
/*
Connect fields with tables and create list of tables that are updated
*/
int
multi_update
::
prepare
(
List
<
Item
>
&
not_used_values
)
{
TABLE_LIST
*
table_ref
;
SQL_LIST
update
;
table_map
tables_to_update
=
0
;
Item_field
*
item
;
List_iterator_fast
<
Item
>
field_it
(
*
fields
);
List_iterator_fast
<
Item
>
value_it
(
*
values
);
uint
i
,
max_fields
;
DBUG_ENTER
(
"multi_update::prepare"
);
do_update
=
true
;
thd
->
count_cuted_fields
=
1
;
thd
->
cuted_fields
=
0L
;
thd
->
proc_info
=
"updating the main table"
;
TABLE_LIST
*
table_ref
;
thd
->
proc_info
=
"updating main table"
;
if
(
thd
->
options
&
OPTION_SAFE_UPDATES
)
{
for
(
table_ref
=
update_tables
;
table_ref
;
table_ref
=
table_ref
->
next
)
{
TABLE
*
table
=
table_ref
->
table
;
if
((
thd
->
options
&
OPTION_SAFE_UPDATES
)
&&
!
table
->
quick_keys
)
while
((
item
=
(
Item_field
*
)
field_it
++
))
tables_to_update
|=
item
->
used_tables
();
if
(
!
tables_to_update
)
{
my_error
(
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
,
MYF
(
0
));
my_error
(
ER_NOT_SUPPORTED_YET
,
MYF
(
0
),
"You didn't specify any tables to UPDATE"
);
DBUG_RETURN
(
1
);
}
}
}
/*
Here I have to connect fields with tables and only update tables that
need to be updated.
I calculate num_updated and fill-up table_sequence
Set table_list->shared to true or false, depending on whether table is
to be updated or not
We have to check values after setup_tables to get used_keys right in
reference tables
*/
Item_field
*
item
;
List_iterator
<
Item
>
it
(
fields
);
num_fields
=
fields
.
elements
;
field_sequence
=
(
uint
*
)
sql_alloc
(
sizeof
(
uint
)
*
num_fields
);
uint
*
int_ptr
=
field_sequence
;
while
((
item
=
(
Item_field
*
)
it
++
))
{
unsigned
int
counter
=
0
;
for
(
table_ref
=
update_tables
;
table_ref
;
table_ref
=
table_ref
->
next
,
counter
++
)
{
if
(
table_ref
->
table
==
item
->
field
->
table
)
{
if
(
!
table_ref
->
shared
)
{
TABLE
*
tbl
=
table_ref
->
table
;
num_updated
++
;
table_ref
->
shared
=
1
;
if
(
!
not_trans_safe
&&
!
table_ref
->
table
->
file
->
has_transactions
())
not_trans_safe
=
true
;
// to be moved if initialize_tables has to be used
tbl
->
no_keyread
=
1
;
tbl
->
used_keys
=
0
;
}
break
;
}
}
if
(
!
table_ref
)
{
net_printf
(
&
thd
->
net
,
ER_NOT_SUPPORTED_YET
,
"JOIN SYNTAX WITH MULTI-TABLE UPDATES"
);
if
(
setup_fields
(
thd
,
all_tables
,
*
values
,
1
,
0
,
0
))
DBUG_RETURN
(
1
);
}
else
*
int_ptr
++=
counter
;
}
if
(
!
num_updated
--
)
{
net_printf
(
&
thd
->
net
,
ER_NOT_SUPPORTED_YET
,
"SET CLAUSE MUST CONTAIN TABLE.FIELD REFERENCE"
);
DBUG_RETURN
(
1
);
}
/*
Here, I have to allocate the array of temporary tables
I have to treat a case of num_updated=1 differently in send_data() method.
Save tables beeing updated in update_tables
update_table->shared is position for table
Don't use key read on tables that are updated
*/
if
(
num_updated
)
{
tmp_tables
=
(
TABLE
**
)
sql_calloc
(
sizeof
(
TABLE
*
)
*
num_updated
);
infos
=
(
COPY_INFO
*
)
sql_calloc
(
sizeof
(
COPY_INFO
)
*
num_updated
);
fields_by_tables
=
(
List_item
**
)
sql_calloc
(
sizeof
(
List_item
*
)
*
(
num_updated
+
1
));
unsigned
int
counter
;
List
<
Item
>
*
temp_fields
;
for
(
table_ref
=
update_tables
,
counter
=
0
;
table_ref
;
table_ref
=
table_ref
->
next
)
update
.
empty
();
for
(
table_ref
=
all_tables
;
table_ref
;
table_ref
=
table_ref
->
next
)
{
if
(
!
table_ref
->
shared
)
continue
;
// Here we have to add row offset as an additional field ...
if
(
!
(
temp_fields
=
(
List_item
*
)
sql_calloc
(
sizeof
(
List_item
))))
TABLE
*
table
=
table_ref
->
table
;
if
(
tables_to_update
&
table
->
map
)
{
error
=
1
;
// A proper error message is due here
TABLE_LIST
*
tl
=
(
TABLE_LIST
*
)
thd
->
memdup
((
char
*
)
table_ref
,
sizeof
(
*
tl
));
if
(
!
tl
)
DBUG_RETURN
(
1
);
update
.
link_in_list
((
byte
*
)
tl
,
(
byte
**
)
&
tl
->
next
);
tl
->
shared
=
table_count
++
;
table
->
no_keyread
=
1
;
table
->
used_keys
=
0
;
table
->
pos_in_table_list
=
tl
;
}
temp_fields
->
empty
();
it
.
rewind
();
int_ptr
=
field_sequence
;
while
((
item
=
(
Item_field
*
)
it
++
))
{
if
(
*
int_ptr
++
==
counter
)
temp_fields
->
push_back
(
item
);
}
if
(
counter
)
{
Field_string
offset
(
table_ref
->
table
->
file
->
ref_length
,
false
,
"offset"
,
table_ref
->
table
,
true
);
temp_fields
->
push_front
(
new
Item_field
(((
Field
*
)
&
offset
)));
// Here I make tmp tables
int
cnt
=
counter
-
1
;
TMP_TABLE_PARAM
tmp_table_param
;
bzero
((
char
*
)
&
tmp_table_param
,
sizeof
(
tmp_table_param
));
tmp_table_param
.
field_count
=
temp_fields
->
elements
;
if
(
!
(
tmp_tables
[
cnt
]
=
create_tmp_table
(
thd
,
&
tmp_table_param
,
*
temp_fields
,
(
ORDER
*
)
0
,
1
,
0
,
0
,
TMP_TABLE_ALL_COLUMNS
)))
{
error
=
1
;
// A proper error message is due here
DBUG_RETURN
(
1
);
}
tmp_tables
[
cnt
]
->
file
->
extra
(
HA_EXTRA_WRITE_CACHE
);
tmp_tables
[
cnt
]
->
file
->
extra
(
HA_EXTRA_IGNORE_DUP_KEY
);
infos
[
cnt
].
handle_duplicates
=
DUP_IGNORE
;
temp_fields
->
pop
();
// because we shall use those for values only ...
}
fields_by_tables
[
counter
]
=
temp_fields
;
counter
++
;
table_count
=
update
.
elements
;
update_tables
=
(
TABLE_LIST
*
)
update
.
first
;
tmp_tables
=
(
TABLE
**
)
thd
->
calloc
(
sizeof
(
TABLE
*
)
*
table_count
);
tmp_table_param
=
(
TMP_TABLE_PARAM
*
)
thd
->
calloc
(
sizeof
(
TMP_TABLE_PARAM
)
*
table_count
);
fields_for_table
=
(
List_item
**
)
thd
->
alloc
(
sizeof
(
List_item
*
)
*
table_count
);
values_for_table
=
(
List_item
**
)
thd
->
alloc
(
sizeof
(
List_item
*
)
*
table_count
);
if
(
thd
->
fatal_error
)
DBUG_RETURN
(
1
);
for
(
i
=
0
;
i
<
table_count
;
i
++
)
{
fields_for_table
[
i
]
=
new
List_item
;
values_for_table
[
i
]
=
new
List_item
;
}
if
(
thd
->
fatal_error
)
DBUG_RETURN
(
1
);
/* Split fields into fields_for_table[] and values_by_table[] */
field_it
.
rewind
();
while
((
item
=
(
Item_field
*
)
field_it
++
))
{
Item
*
value
=
value_it
++
;
uint
offset
=
item
->
field
->
table
->
pos_in_table_list
->
shared
;
fields_for_table
[
offset
]
->
push_back
(
item
);
values_for_table
[
offset
]
->
push_back
(
value
);
}
if
(
thd
->
fatal_error
)
DBUG_RETURN
(
1
);
/* Allocate copy fields */
max_fields
=
0
;
for
(
i
=
0
;
i
<
table_count
;
i
++
)
set_if_bigger
(
max_fields
,
fields_for_table
[
i
]
->
elements
);
copy_field
=
new
Copy_field
[
max_fields
];
init_ftfuncs
(
thd
,
1
);
error
=
0
;
// Timestamps do not need to be restored, so far ...
DBUG_RETURN
(
0
);
DBUG_RETURN
(
thd
->
fatal_error
!=
0
);
}
void
/*
Store first used table in main_table as this should be updated first
This is because we know that no row in this table will be read twice.
Create temporary tables to store changed values for all other tables
that are updated.
*/
bool
multi_update
::
initialize_tables
(
JOIN
*
join
)
{
#ifdef NOT_YET
We
skip
it
as
it
only
makes
a
mess
...........
TABLE_LIST
*
walk
;
table_map
tables_to_update_from
=
0
;
for
(
walk
=
update_tables
;
walk
;
walk
=
walk
->
next
)
tables_to_update_from
|=
walk
->
table
->
map
;
TABLE_LIST
*
table_ref
;
DBUG_ENTER
(
"initialize_tables"
);
walk
=
update_tables
;
for
(
JOIN_TAB
*
tab
=
join
->
join_tab
,
*
end
=
join
->
join_tab
+
join
->
tables
;
tab
<
end
;
tab
++
)
if
((
thd
->
options
&
OPTION_SAFE_UPDATES
)
&&
error_if_full_join
(
join
))
DBUG_RETURN
(
1
);
main_table
=
join
->
join_tab
->
table
;
trans_safe
=
transactional_tables
=
main_table
->
file
->
has_transactions
();
log_delayed
=
trans_safe
||
main_table
->
tmp_table
!=
NO_TMP_TABLE
;
/* Create a temporary table for all tables after except main table */
for
(
table_ref
=
update_tables
;
table_ref
;
table_ref
=
table_ref
->
next
)
{
if
(
tab
->
table
->
map
&
tables_to_update_from
)
TABLE
*
table
=
table_ref
->
table
;
if
(
table
!=
main_table
)
{
// We are going to update from this table
TABLE
*
tbl
=
walk
->
table
=
tab
->
table
;
/* Don't use KEYREAD optimization on this table */
tbl
->
no_keyread
=
1
;
walk
=
walk
->
next
;
uint
cnt
=
table_ref
->
shared
;
ORDER
group
;
List
<
Item
>
temp_fields
=
*
fields_for_table
[
cnt
];
TMP_TABLE_PARAM
*
tmp_param
=
tmp_table_param
+
cnt
;
/*
Create a temporary table to store all fields that are changed for this
table. The first field in the temporary table is a pointer to the
original row so that we can find and update it
*/
/* ok to be on stack as this is not referenced outside of this func */
Field_string
offset
(
table
->
file
->
ref_length
,
0
,
"offset"
,
table
,
1
);
if
(
temp_fields
.
push_front
(
new
Item_field
(((
Field
*
)
&
offset
))))
DBUG_RETURN
(
1
);
/* Make an unique key over the first field to avoid duplicated updates */
bzero
((
char
*
)
&
group
,
sizeof
(
group
));
group
.
asc
=
1
;
group
.
item
=
(
Item
**
)
temp_fields
.
head_ref
();
tmp_param
->
quick_group
=
1
;
tmp_param
->
field_count
=
temp_fields
.
elements
;
tmp_param
->
group_parts
=
1
;
tmp_param
->
group_length
=
table
->
file
->
ref_length
;
if
(
!
(
tmp_tables
[
cnt
]
=
create_tmp_table
(
thd
,
tmp_param
,
temp_fields
,
(
ORDER
*
)
&
group
,
0
,
0
,
0
,
TMP_TABLE_ALL_COLUMNS
)))
DBUG_RETURN
(
1
);
tmp_tables
[
cnt
]
->
file
->
extra
(
HA_EXTRA_WRITE_CACHE
);
}
}
#endif
DBUG_RETURN
(
0
);
}
multi_update
::~
multi_update
()
{
int
counter
=
0
;
for
(
table_being_updated
=
update_tables
;
table_being_updated
;
counter
++
,
table_being_updated
=
table_being_updated
->
next
)
{
TABLE
*
table
=
table_being_updated
->
table
;
table
->
no_keyread
=
0
;
if
(
error
)
table
->
time_stamp
=
save_time_stamps
[
counter
];
}
TABLE_LIST
*
table
;
for
(
table
=
update_tables
;
table
;
table
=
table
->
next
)
table
->
table
->
no_keyread
=
0
;
if
(
tmp_tables
)
for
(
uint
counter
=
0
;
counter
<
num_updated
;
counter
++
)
{
for
(
uint
counter
=
0
;
counter
<
table_count
;
counter
++
)
if
(
tmp_tables
[
counter
])
free_tmp_table
(
thd
,
tmp_tables
[
counter
]);
}
if
(
copy_field
)
delete
[]
copy_field
;
thd
->
count_cuted_fields
=
0
;
// Restore this setting
if
(
!
trans_safe
)
thd
->
options
|=
OPTION_STATUS_NO_TRANS_UPDATE
;
}
bool
multi_update
::
send_data
(
List
<
Item
>
&
values
)
bool
multi_update
::
send_data
(
List
<
Item
>
&
not_used_
values
)
{
List
<
Item
>
real_values
(
values
);
for
(
uint
counter
=
0
;
counter
<
fields
.
elements
;
counter
++
)
real_values
.
pop
();
// We have skipped fields ....
if
(
!
num_updated
)
{
for
(
table_being_updated
=
update_tables
;
table_being_updated
;
table_being_updated
=
table_being_updated
->
next
)
{
if
(
!
table_being_updated
->
shared
)
continue
;
TABLE
*
table
=
table_being_updated
->
table
;
/* Check if we are using outer join and we didn't find the row */
if
(
table
->
status
&
(
STATUS_NULL_ROW
|
STATUS_UPDATED
))
return
0
;
table
->
file
->
position
(
table
->
record
[
0
]);
// Only one table being updated receives a completely different treatment
table
->
status
|=
STATUS_UPDATED
;
store_record
(
table
,
1
);
if
(
fill_record
(
fields
,
real_values
))
return
1
;
TABLE_LIST
*
cur_table
;
DBUG_ENTER
(
"multi_update::send_data"
);
found
++
;
if
(
/* compare_record(table, query_id) && */
!
(
error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
updated
++
;
table
->
file
->
extra
(
HA_EXTRA_NO_CACHE
);
return
error
;
}
}
else
{
int
secure_counter
=
-
1
;
for
(
table_being_updated
=
update_tables
;
table_being_updated
;
table_being_updated
=
table_being_updated
->
next
,
secure_counter
++
)
for
(
cur_table
=
update_tables
;
cur_table
;
cur_table
=
cur_table
->
next
)
{
if
(
!
table_being_updated
->
shared
)
continue
;
TABLE
*
table
=
table_being_updated
->
table
;
TABLE
*
table
=
cur_table
->
table
;
/* Check if we are using outer join and we didn't find the row */
if
(
table
->
status
&
(
STATUS_NULL_ROW
|
STATUS_UPDATED
))
continue
;
uint
offset
=
cur_table
->
shared
;
table
->
file
->
position
(
table
->
record
[
0
]);
Item
*
item
;
List_iterator
<
Item
>
it
(
real_values
);
List
<
Item
>
values_by_table
;
uint
*
int_ptr
=
field_sequence
;
while
((
item
=
(
Item
*
)
it
++
))
{
if
(
*
int_ptr
++
==
(
uint
)
(
secure_counter
+
1
))
values_by_table
.
push_back
(
item
);
}
// Here I am breaking values as per each table
if
(
secure_counter
<
0
)
if
(
table
==
main_table
)
{
table
->
status
|=
STATUS_UPDATED
;
store_record
(
table
,
1
);
if
(
fill_record
(
*
fields_by_tables
[
0
],
values_by_table
))
return
1
;
found
++
;
if
(
/*compare_record(table, query_id) && */
!
(
error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
if
(
fill_record
(
*
fields_for_table
[
offset
],
*
values_for_table
[
offset
]))
DBUG_RETURN
(
1
);
if
(
compare_record
(
table
,
thd
->
query_id
))
{
updated
++
;
table
->
file
->
extra
(
HA_EXTRA_NO_CACHE
);
int
error
;
if
(
!
updated
++
)
{
/*
Inform the main table that we are going to update the table even
while we may be scanning it. This will flush the read cache
if it's used.
*/
main_table
->
file
->
extra
(
HA_EXTRA_PREPARE_FOR_UPDATE
);
}
else
if
((
error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
{
table
->
file
->
print_error
(
error
,
MYF
(
0
));
if
(
!
error
)
error
=
1
;
return
1
;
updated
--
;
DBUG_RETURN
(
1
);
}
}
}
else
{
// Here we insert into each temporary table
values_by_table
.
push_front
(
new
Item_string
((
char
*
)
table
->
file
->
ref
,
table
->
file
->
ref_length
));
fill_record
(
tmp_tables
[
secure_counter
]
->
field
,
values_by_table
);
error
=
write_record
(
tmp_tables
[
secure_counter
],
&
(
infos
[
secure_counter
]));
if
(
error
)
int
error
;
TABLE
*
tmp_table
=
tmp_tables
[
offset
];
fill_record
(
tmp_table
->
field
+
1
,
*
values_for_table
[
offset
]);
/* Store pointer to row */
memcpy
((
char
*
)
tmp_table
->
field
[
0
]
->
ptr
,
(
char
*
)
table
->
file
->
ref
,
table
->
file
->
ref_length
);
/* Write row, ignoring duplicated updates to a row */
if
((
error
=
tmp_table
->
file
->
write_row
(
tmp_table
->
record
[
0
]))
&&
(
error
!=
HA_ERR_FOUND_DUPP_KEY
&&
error
!=
HA_ERR_FOUND_DUPP_UNIQUE
))
{
error
=-
1
;
return
1
;
if
(
create_myisam_from_heap
(
table
,
tmp_table_param
+
offset
,
error
,
1
))
{
do_update
=
0
;
DBUG_RETURN
(
1
);
// Not a table_is_full error
}
}
}
}
return
0
;
DBUG_RETURN
(
0
)
;
}
void
multi_update
::
send_error
(
uint
errcode
,
const
char
*
err
)
{
/* First send error what ever it is ... */
::
send_error
(
&
thd
->
net
,
errcode
,
err
);
/* reset used flags */
// update_tables->table->no_keyread=0;
/* If nothing updated return */
if
(
!
updated
)
return
;
...
...
@@ -676,97 +686,124 @@ void multi_update::send_error(uint errcode,const char *err)
/* Something already updated so we have to invalidate cache */
query_cache_invalidate3
(
thd
,
update_tables
,
1
);
/* Below can happen when thread is killed early ... */
if
(
!
table_being_updated
)
table_being_updated
=
update_tables
;
/*
If rows from the first table only has been updated and it is transactional,
just do rollback.
The same if all tables are transactional, regardless of where we are.
In all other cases do attempt updates ...
If all tables that has been updated are trans safe then just do rollback.
If not attempt to do remaining updates.
*/
if
((
table_being_updated
->
table
->
file
->
has_transactions
()
&&
table_being_updated
==
update_tables
)
||
!
not_
trans_safe
)
if
(
trans_safe
)
ha_rollback_stmt
(
thd
);
else
if
(
do_update
&&
num_updated
)
VOID
(
do_updates
(
true
));
else
if
(
do_update
&&
table_count
>
1
)
{
/* Add warning here */
VOID
(
do_updates
(
0
));
}
}
int
multi_update
::
do_updates
(
bool
from_send_error
)
int
multi_update
::
do_updates
(
bool
from_send_error
)
{
int
local_error
=
0
,
counter
=
0
;
TABLE_LIST
*
cur_table
;
int
local_error
;
ha_rows
org_updated
;
TABLE
*
table
;
DBUG_ENTER
(
"do_updates"
);
if
(
from_send_error
)
do_update
=
0
;
// Don't retry this function
for
(
cur_table
=
update_tables
;
cur_table
;
cur_table
=
cur_table
->
next
)
{
/* Found out table number for 'table_being_updated' */
for
(
TABLE_LIST
*
aux
=
update_tables
;
aux
!=
table_being_updated
;
aux
=
aux
->
next
)
counter
++
;
}
else
table_being_updated
=
update_tables
;
table
=
cur_table
->
table
;
if
(
table
==
main_table
)
continue
;
// Already updated
org_updated
=
updated
;
byte
*
ref_pos
;
TABLE
*
tmp_table
=
tmp_tables
[
cur_table
->
shared
];
tmp_table
->
file
->
extra
(
HA_EXTRA_CACHE
);
// Change to read cache
table
->
file
->
extra
(
HA_EXTRA_NO_CACHE
);
do_update
=
false
;
for
(
table_being_updated
=
table_being_updated
->
next
;
table_being_updated
;
table_being_updated
=
table_being_updated
->
next
,
counter
++
)
/*
Setup copy functions to copy fields from temporary table
*/
List_iterator_fast
<
Item
>
field_it
(
*
fields_for_table
[
cur_table
->
shared
]);
Field
**
field
=
tmp_table
->
field
+
1
;
// Skip row pointer
Copy_field
*
copy_field_ptr
=
copy_field
,
*
copy_field_end
;
for
(
;
*
field
;
field
++
)
{
if
(
!
table_being_updated
->
shared
)
continue
;
Item_field
*
item
=
(
Item_field
*
)
field_it
++
;
(
copy_field_ptr
++
)
->
set
(
item
->
field
,
*
field
,
0
);
}
copy_field_end
=
copy_field_ptr
;
TABLE
*
table
=
table_being_updated
->
table
;
TABLE
*
tmp_table
=
tmp_tables
[
counter
];
if
(
tmp_table
->
file
->
extra
(
HA_EXTRA_NO_CACHE
))
if
((
local_error
=
tmp_table
->
file
->
rnd_init
(
1
)))
goto
err
;
ref_pos
=
(
byte
*
)
tmp_table
->
field
[
0
]
->
ptr
;
for
(;;)
{
local_error
=
1
;
if
(
thd
->
killed
&&
trans_safe
)
goto
err
;
if
((
local_error
=
tmp_table
->
file
->
rnd_next
(
tmp_table
->
record
[
0
])))
{
if
(
local_error
==
HA_ERR_END_OF_FILE
)
break
;
if
(
local_error
==
HA_ERR_RECORD_DELETED
)
continue
;
// May happen on dup key
goto
err
;
}
List
<
Item
>
list
;
Field
**
ptr
=
tmp_table
->
field
,
*
field
;
// This is supposed to be something like insert_fields
thd
->
used_tables
|=
tmp_table
->
map
;
while
((
field
=
*
ptr
++
))
{
list
.
push_back
((
Item
*
)
new
Item_field
(
field
));
if
(
field
->
query_id
==
thd
->
query_id
)
thd
->
dupp_field
=
field
;
field
->
query_id
=
thd
->
query_id
;
tmp_table
->
used_keys
&=
field
->
part_of_key
;
}
tmp_table
->
used_fields
=
tmp_table
->
fields
;
local_error
=
0
;
list
.
pop
();
// we get position some other way ...
local_error
=
tmp_table
->
file
->
rnd_init
(
1
);
if
(
local_error
)
return
local_error
;
while
(
!
(
local_error
=
tmp_table
->
file
->
rnd_next
(
tmp_table
->
record
[
0
]))
&&
(
!
thd
->
killed
||
from_send_error
||
not_trans_safe
))
{
found
++
;
local_error
=
table
->
file
->
rnd_pos
(
table
->
record
[
0
],
(
byte
*
)
(
*
(
tmp_table
->
field
))
->
ptr
);
if
(
local_error
)
return
local_error
;
if
((
local_error
=
table
->
file
->
rnd_pos
(
table
->
record
[
0
],
ref_pos
)))
goto
err
;
table
->
status
|=
STATUS_UPDATED
;
store_record
(
table
,
1
);
local_error
=
(
fill_record
(
*
fields_by_tables
[
counter
+
1
],
list
)
||
/* compare_record(table, query_id) || */
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
]));
if
(
local_error
)
/* Copy data from temporary table to current table */
for
(
copy_field_ptr
=
copy_field
;
copy_field_ptr
!=
copy_field_end
;
copy_field_ptr
++
)
(
*
copy_field_ptr
->
do_copy
)(
copy_field_ptr
);
if
(
compare_record
(
table
,
thd
->
query_id
))
{
table
->
file
->
print_error
(
local_error
,
MYF
(
0
));
break
;
if
((
local_error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
{
if
(
local_error
!=
HA_ERR_FOUND_DUPP_KEY
||
handle_duplicates
!=
DUP_IGNORE
)
goto
err
;
}
else
updated
++
;
if
(
table
->
tmp_table
!=
NO_TMP_TABLE
)
log_delayed
=
1
;
}
if
(
local_error
==
HA_ERR_END_OF_FILE
)
local_error
=
0
;
}
return
local_error
;
if
(
updated
!=
org_updated
)
{
if
(
table
->
tmp_table
!=
NO_TMP_TABLE
)
log_delayed
=
1
;
// Tmp tables forces delay log
if
(
table
->
file
->
has_transactions
())
log_delayed
=
transactional_tables
=
1
;
else
trans_safe
=
0
;
// Can't do safe rollback
}
}
DBUG_RETURN
(
0
);
err:
if
(
!
from_send_error
)
table
->
file
->
print_error
(
local_error
,
MYF
(
0
));
if
(
updated
!=
org_updated
)
{
if
(
table
->
tmp_table
!=
NO_TMP_TABLE
)
log_delayed
=
1
;
if
(
table
->
file
->
has_transactions
())
log_delayed
=
transactional_tables
=
1
;
else
trans_safe
=
0
;
}
DBUG_RETURN
(
1
);
}
...
...
@@ -774,50 +811,49 @@ int multi_update::do_updates (bool from_send_error)
bool
multi_update
::
send_eof
()
{
thd
->
proc_info
=
"updating the reference tables"
;
char
buff
[
80
];
thd
->
proc_info
=
"updating reference tables"
;
/* Does updates for the last n - 1 tables, returns 0 if ok */
int
local_error
=
(
num_updated
)
?
do_updates
(
false
)
:
0
;
/* reset used flags */
#ifndef NOT_USED
update_tables
->
table
->
no_keyread
=
0
;
#endif
if
(
local_error
==
-
1
)
local_error
=
0
;
int
local_error
=
(
table_count
)
?
do_updates
(
0
)
:
0
;
thd
->
proc_info
=
"end"
;
if
(
local_error
)
send_error
(
local_error
,
"An error occured in multi-table update"
);
/*
Write the SQL statement to the binlog if we updated
rows and we succeeded, or also in an error case when there
was a non-transaction-safe table involved, since
modifications in it cannot be rolled back.
rows and we succeeded or if we updated some non
transacational tables
*/
if
(
updated
||
not_trans_safe
)
if
(
updated
&&
(
local_error
<=
0
||
!
trans_safe
)
)
{
mysql_update_log
.
write
(
thd
,
thd
->
query
,
thd
->
query_length
);
Query_log_event
qinfo
(
thd
,
thd
->
query
,
thd
->
query_length
,
0
);
/*
mysql_bin_log is not open if binlogging or replication
i
s not used
*/
if
(
mysql_bin_log
.
is_open
()
&&
mysql_bin_log
.
write
(
&
qinfo
)
&&
!
not_trans_safe
)
local_error
=
1
;
/* Log write failed: roll back the SQL statement */
if
(
mysql_bin_log
.
is_open
())
{
Query_log_event
qinfo
(
thd
,
thd
->
query
,
thd
->
query_length
,
log_delayed
);
i
f
(
mysql_bin_log
.
write
(
&
qinfo
)
&&
trans_safe
)
local_error
=
1
;
// Rollback update
}
if
(
!
log_delayed
)
thd
->
options
|=
OPTION_STATUS_NO_TRANS_UPDATE
;
}
/* Commit or rollback the current SQL statement */
VOID
(
ha_autocommit_or_rollback
(
thd
,
local_error
>
0
));
if
(
transactional_tables
)
{
if
(
ha_autocommit_or_rollback
(
thd
,
local_error
>=
0
))
local_error
=
1
;
}
else
local_error
=
0
;
// this can happen only if it is end of file error
if
(
!
local_error
)
// if the above log write did not fail ...
if
(
local_error
>
0
)
// if the above log write did not fail ...
{
char
buff
[
80
];
/* Safety: If we haven't got an error before (should not happen) */
my_message
(
ER_UNKNOWN_ERROR
,
"An error occured in multi-table update"
,
MYF
(
0
));
::
send_error
(
&
thd
->
net
);
return
1
;
}
sprintf
(
buff
,
ER
(
ER_UPDATE_INFO
),
(
long
)
found
,
(
long
)
updated
,
(
long
)
thd
->
cuted_fields
);
if
(
updated
)
...
...
@@ -827,7 +863,5 @@ bool multi_update::send_eof()
::
send_ok
(
&
thd
->
net
,
(
thd
->
client_capabilities
&
CLIENT_FOUND_ROWS
)
?
found
:
updated
,
thd
->
insert_id_used
?
thd
->
insert_id
()
:
0L
,
buff
);
}
thd
->
count_cuted_fields
=
0
;
return
0
;
}
sql/sql_yacc.yy
View file @
bd53a308
...
...
@@ -1451,9 +1451,9 @@ select:
select_init { Lex->sql_command=SQLCOM_SELECT; };
select_init:
SELECT_SYM select_part2 { Select->braces=
false
; } opt_union
SELECT_SYM select_part2 { Select->braces=
0
; } opt_union
|
'(' SELECT_SYM select_part2 ')' { Select->braces=
true
;} union_opt;
'(' SELECT_SYM select_part2 ')' { Select->braces=
1
;} union_opt;
select_part2:
...
...
@@ -2366,7 +2366,7 @@ procedure_clause:
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= (byte**) &lex->proc_list.first;
if (add_proc_to_list(new Item_field(NULL,NULL,$2.str)))
if (add_proc_to_list(
lex->thd,
new Item_field(NULL,NULL,$2.str)))
YYABORT;
current_thd->safe_to_cache_query=0;
}
...
...
@@ -2384,10 +2384,11 @@ procedure_list2:
procedure_item:
remember_name expr
{
if (add_proc_to_list($2))
LEX *lex= Lex;
if (add_proc_to_list(lex->thd, $2))
YYABORT;
if (!$2->name)
$2->set_name($1,(uint) ((char*)
L
ex->tok_end - $1));
$2->set_name($1,(uint) ((char*)
l
ex->tok_end - $1));
};
opt_into:
...
...
sql/table.h
View file @
bd53a308
...
...
@@ -126,7 +126,11 @@ struct st_table {
key_part_map
const_key_parts
[
MAX_KEY
];
ulong
query_id
;
uint
temp_pool_slot
;
union
/* Temporary variables */
{
uint
temp_pool_slot
;
/* Used by intern temp tables */
struct
st_table_list
*
pos_in_table_list
;
};
THD
*
in_use
;
/* Which thread uses this */
struct
st_table
*
next
,
*
prev
;
...
...
@@ -148,10 +152,10 @@ typedef struct st_table_list
GRANT_INFO
grant
;
thr_lock_type
lock_type
;
uint
outer_join
;
/* Which join type */
uint
shared
;
/* Used in union or in multi-upd */
uint32
db_length
,
real_name_length
;
bool
straight
;
/* optimize with prev table */
bool
updating
;
/* for replicate-do/ignore table */
bool
shared
;
/* Used twice in union */
bool
do_redirect
;
/* To get the struct in UNION's */
}
TABLE_LIST
;
...
...
sql/uniques.cc
View file @
bd53a308
...
...
@@ -53,7 +53,8 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
:
max_in_memory_size
(
max_in_memory_size_arg
),
elements
(
0
)
{
my_b_clear
(
&
file
);
init_tree
(
&
tree
,
max_in_memory_size
/
16
,
0
,
size
,
comp_func
,
0
,
NULL
,
comp_func_fixed_arg
);
init_tree
(
&
tree
,
max_in_memory_size
/
16
,
0
,
size
,
comp_func
,
0
,
NULL
,
comp_func_fixed_arg
);
/* If the following fail's the next add will also fail */
my_init_dynamic_array
(
&
file_ptrs
,
sizeof
(
BUFFPEK
),
16
,
16
);
max_elements
=
max_in_memory_size
/
ALIGN_SIZE
(
sizeof
(
TREE_ELEMENT
)
+
size
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment