Commit c13bf0e4 authored by brian@zim.(none)'s avatar brian@zim.(none)

Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.1-arch

into  zim.(none):/home/brian/mysql/unmerge-5.1
parents 10dc8025 56491954
......@@ -13820,4 +13820,31 @@ i v
2 abc
4 3r4f
5 lmn
DROP TABLE t5;
CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b varchar(250),
c varchar(800),
KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (NULL, "foo", "grok this!");
INSERT INTO t5 VALUES (NULL, "We the people", NULL);
INSERT INTO t5 VALUES (NULL, "in order to form a more peefect union", "secure the blessing of liberty");
INSERT INTO t5 VALUES (NULL, "establish justice", "to ourselves and");
INSERT INTO t5 VALUES (32, "ensure domestic tranquility", NULL);
INSERT INTO t5 VALUES (23, "provide for the common defense", "posterity");
INSERT INTO t5 VALUES (NULL, "promote the general welfare", "do ordain");
INSERT INTO t5 VALUES (NULL, "abcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabc", "do ordain");
Warnings:
Warning 1265 Data truncated for column 'b' at row 1
SELECT * FROM t5;
a b c
1 foo grok this!
2 We the people NULL
3 in order to form a more peefect union secure the blessing of liberty
4 establish justice to ourselves and
32 ensure domestic tranquility NULL
23 provide for the common defense posterity
33 promote the general welfare do ordain
34 abcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyz do ordain
drop table t1, t2, t4, t5;
......@@ -1492,6 +1492,27 @@ select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn');
alter table t1 data directory="$MYSQLTEST_VARDIR/tmp";
select * from t1;
# Testing cleared row key
DROP TABLE t5;
CREATE TABLE `t5` (
`a` int(11) NOT NULL auto_increment,
b varchar(250),
c varchar(800),
KEY (`a`)
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1;
INSERT INTO t5 VALUES (NULL, "foo", "grok this!");
INSERT INTO t5 VALUES (NULL, "We the people", NULL);
INSERT INTO t5 VALUES (NULL, "in order to form a more peefect union", "secure the blessing of liberty");
INSERT INTO t5 VALUES (NULL, "establish justice", "to ourselves and");
INSERT INTO t5 VALUES (32, "ensure domestic tranquility", NULL);
INSERT INTO t5 VALUES (23, "provide for the common defense", "posterity");
INSERT INTO t5 VALUES (NULL, "promote the general welfare", "do ordain");
INSERT INTO t5 VALUES (NULL, "abcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabcdeghijklmnopqrstuvwxyzabc", "do ordain");
SELECT * FROM t5;
#
# Cleanup, test is over
#
......
......@@ -6578,9 +6578,9 @@ void Field_varstring::sql_type(String &res) const
}
uint32 Field_varstring::data_length(const char *from)
uint32 Field_varstring::data_length()
{
return length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr);
return length_bytes == 1 ? (uint32) (uchar) *ptr : uint2korr(ptr);
}
/*
......
......@@ -153,7 +153,7 @@ public:
/*
data_length() return the "real size" of the data in memory.
*/
virtual uint32 data_length(const char *from) { return pack_length(); }
virtual uint32 data_length() { return pack_length(); }
virtual uint32 sort_length() const { return pack_length(); }
virtual void reset(void) { bzero(ptr,pack_length()); }
virtual void reset_fields() {}
......@@ -239,7 +239,7 @@ public:
*/
my_size_t last_null_byte() const {
my_size_t bytes= do_last_null_byte();
DBUG_PRINT("debug", ("last_null_byte() ==> %d", bytes));
DBUG_PRINT("debug", ("last_null_byte() ==> %d", (uint32)bytes));
DBUG_ASSERT(bytes <= table->s->null_bytes);
return bytes;
}
......@@ -1167,7 +1167,7 @@ public:
int key_cmp(const byte *str, uint length);
uint packed_col_length(const char *to, uint length);
uint max_packed_col_length(uint max_length);
uint32 data_length(const char *from);
uint32 data_length();
uint size_of() const { return sizeof(*this); }
enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; }
bool has_charset(void) const
......
......@@ -329,10 +329,14 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu",
(long long unsigned)*rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu",
(long long unsigned) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu",
(long long unsigned)*auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu",
(long long unsigned)*forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
......@@ -385,12 +389,14 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
(uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
(uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu",
(unsigned long long)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu",
(unsigned long long)check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
auto_increment));
(unsigned long long)auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
forced_flushes));
(unsigned long long)forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
......@@ -774,7 +780,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
written= azwrite(writer, buf, table->s->reclength);
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d",
written, table->s->reclength));
(uint32)written,
(uint32)table->s->reclength));
if (!delayed_insert || !bulk_insert)
share->dirty= TRUE;
......@@ -913,18 +920,25 @@ int ha_archive::write_row(byte *buf)
*/
for (Field **field=table->field ; *field ; field++)
{
DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length()));
DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset())));
if ((*field)->real_type() == MYSQL_TYPE_VARCHAR)
{
uint actual_length= (*field)->data_length((char*) buf + (*field)->offset());
uint offset= (*field)->offset() + actual_length +
(actual_length > 255 ? 2 : 1);
DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset));
/*
if ((*field)->pack_length() + (*field)->offset() != offset)
bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length));
Pack length will report 256 when you have 255 bytes
of data plus the single byte for length.
Probably could have added a method to say the number
of bytes taken up by field for the length data.
*/
uint32 actual_length= (*field)->data_length() +
((*field)->pack_length() > 256 ? 2 : 1);
if ((*field)->real_type() == MYSQL_TYPE_VARCHAR)
{
char *ptr= (*field)->ptr + actual_length;
DBUG_ASSERT(actual_length <= (*field)->pack_length());
uint32 to_free= (*field)->pack_length() - actual_length;
if (to_free > 0)
bzero(ptr, to_free);
}
}
......@@ -1054,7 +1068,8 @@ int ha_archive::rnd_init(bool scan)
if (scan)
{
scan_rows= share->rows_recorded;
DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
DBUG_PRINT("info", ("archive will retrieve %llu rows",
(unsigned long long)scan_rows));
stats.records= 0;
/*
......@@ -1096,8 +1111,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
DBUG_ENTER("ha_archive::get_row");
read= azread(file_to_read, buf, table->s->reclength);
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read,
table->s->reclength));
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", read,
(unsigned long)table->s->reclength));
if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
......@@ -1314,7 +1329,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded++;
}
}
DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded));
DBUG_PRINT("info", ("recovered %llu archive rows",
(unsigned long long)share->rows_recorded));
my_free((char*)buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment