Commit 95b8b028 authored by tnurnberg@sin.intern.azundris.com's avatar tnurnberg@sin.intern.azundris.com

Merge tnurnberg@bk-internal.mysql.com:/home/bk/mysql-5.1-maint

into  sin.intern.azundris.com:/home/tnurnberg/22540/51-22540
parents ef80d45d 5b98b71d
......@@ -5,7 +5,6 @@
-- source include/have_log_bin.inc
-- source include/not_embedded.inc
-- source include/have_innodb.inc
-- source include/have_log_bin.inc
-- source include/have_debug.inc
--disable_warnings
......@@ -139,6 +138,36 @@ show binlog events from 0;
set session autocommit = @ac;
# now show that nothing breaks if we need to read from the cache more
# than once, resulting in split event-headers
set @bcs = @@binlog_cache_size;
set @ac = @@autocommit;
set global binlog_cache_size=4096;
set autocommit= 0;
reset master;
create table t1 (a int) engine=innodb;
let $1=400;
disable_query_log;
begin;
while ($1)
{
eval insert into t1 values( $1 );
dec $1;
}
commit;
enable_query_log;
show binlog events from 0;
drop table t1;
set global binlog_cache_size=@bcs;
set session autocommit = @ac;
--echo End of 5.0 tests
# Test of a too big SET INSERT_ID: see if the truncated value goes
......
......@@ -3995,58 +3995,61 @@ int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
memcpy((char *)cache->read_pos, &header[carry], LOG_EVENT_HEADER_LEN - carry);
/* next event header at ... */
hdr_offs = LOG_EVENT_HEADER_LEN - carry +
uint4korr(&header[EVENT_LEN_OFFSET]);
hdr_offs = uint4korr(&header[EVENT_LEN_OFFSET]) - carry;
carry= 0;
}
/* if there is anything to write, process it. */
if(likely(bytes > 0))
if (likely(bytes > 0))
{
/*
next header beyond current read-buffer? we'll get it later
(though not necessarily in the very next iteration).
process all event-headers in this (partial) cache.
if next header is beyond current read-buffer,
we'll get it later (though not necessarily in the
very next iteration, just "eventually").
*/
if (hdr_offs >= bytes)
hdr_offs -= bytes;
else
while (hdr_offs < bytes)
{
/*
partial header only? save what we can get, process once
we get the rest.
*/
/* process all event-headers in this (partial) cache. */
do {
/*
partial header only? save what we can get, process once
we get the rest.
*/
if (hdr_offs + LOG_EVENT_HEADER_LEN > bytes)
{
carry= bytes - hdr_offs;
memcpy(header, (char *)cache->read_pos + hdr_offs, carry);
bytes= hdr_offs;
}
else
{
/* we've got a full event-header, and it came in one piece */
if (hdr_offs + LOG_EVENT_HEADER_LEN > bytes)
{
carry= bytes - hdr_offs;
memcpy(header, (char *)cache->read_pos + hdr_offs, carry);
bytes= hdr_offs;
}
else
{
/* we've got a full event-header, and it came in one piece */
uchar *log_pos= (uchar *)cache->read_pos + hdr_offs + LOG_POS_OFFSET;
uchar *log_pos= (uchar *)cache->read_pos + hdr_offs + LOG_POS_OFFSET;
/* fix end_log_pos */
val= uint4korr(log_pos) + group;
int4store(log_pos, val);
/* fix end_log_pos */
val= uint4korr(log_pos) + group;
int4store(log_pos, val);
/* next event header at ... */
log_pos= (uchar *)cache->read_pos + hdr_offs + EVENT_LEN_OFFSET;
hdr_offs += uint4korr(log_pos);
/* next event header at ... */
log_pos= (uchar *)cache->read_pos + hdr_offs + EVENT_LEN_OFFSET;
hdr_offs += uint4korr(log_pos);
}
} while (hdr_offs < bytes);
}
}
/*
Adjust hdr_offs. Note that this doesn't mean it will necessarily
be valid in the next iteration; if the current event is very long,
it may take a couple of read-iterations (and subsequent fixings
of hdr_offs) for it to become valid again.
if we had a split header, hdr_offs was already fixed above.
*/
if (carry == 0)
hdr_offs -= bytes;
}
/* Write data to the binary log file */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment