Commit 3b2f5c05 authored by marko's avatar marko

branches/zip: When creating an index in innodb_strict_mode, check that

the maximum record size will never exceed the B-tree page size limit.
For uncompressed tables, there should always be enough space for two
records in an empty B-tree page.  For compressed tables, there should
be enough space for storing two node pointer records or one data
record in an empty page in uncompressed format.

dict_build_table_def_step(): Remove the inaccurate check for table row
size.

dict_index_too_big_for_tree(): New function: check if the index
records would be too big for a B-tree page.

dict_index_add_to_cache(): Add the parameter "strict".  Invoke
dict_index_too_big_for_tree() if it is set.

trx_is_strict(), thd_is_strict(): New functions, for determining if
innodb_strict_mode is enabled for the current transaction.

dict_create_index_step(): Pass the new parameter strict of
dict_index_add_to_cache() as trx_is_strict(trx).  All other callers
pass it as FALSE.

innodb.test: Enable innodb_strict_mode before attempting to create a
table with a too big record size.

innodb-zip.test: Remove the test of inserting random data.  Add tests
for checking that the maximum record lengths are enforced at table
creation time.
parent 8a484d98
2008-09-17 The InnoDB Team
* mysql-test/innodb.result, mysql-test/innodb-zip.result,
mysql-test/innodb-zip.test, mysql-test/innodb.test,
ibuf/ibuf0ibuf.c, dict/dict0crea.c, dict/dict0load.c, dict/dict0boot.c,
include/dict0dict.h, include/trx0trx.h,
dict/dict0dict.c, trx/trx0trx.c,
include/ha_prototypes.h, handler/ha_innodb.cc:
When creating an index in innodb_strict_mode, check that the
maximum record size will never exceed the B-tree page size limit.
For uncompressed tables, there should always be enough space for
two records in an empty B-tree page. For compressed tables, there
should be enough space for storing two node pointer records or one
data record in an empty page in uncompressed format.
The purpose of this check is to guarantee that INSERT or UPDATE
will never fail due to too big record size.
2008-09-17 The InnoDB Team
* btr/btr0cur.c, data/data0data.c, include/page0zip.h,
......
......@@ -279,7 +279,8 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr
+ DICT_HDR_TABLES,
MLOG_4BYTES, &mtr));
MLOG_4BYTES, &mtr),
FALSE);
ut_a(error == DB_SUCCESS);
/*-------------------------*/
......@@ -291,7 +292,8 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr
+ DICT_HDR_TABLE_IDS,
MLOG_4BYTES, &mtr));
MLOG_4BYTES, &mtr),
FALSE);
ut_a(error == DB_SUCCESS);
/*-------------------------*/
......@@ -322,7 +324,8 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr
+ DICT_HDR_COLUMNS,
MLOG_4BYTES, &mtr));
MLOG_4BYTES, &mtr),
FALSE);
ut_a(error == DB_SUCCESS);
/*-------------------------*/
......@@ -363,7 +366,8 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr
+ DICT_HDR_INDEXES,
MLOG_4BYTES, &mtr));
MLOG_4BYTES, &mtr),
FALSE);
ut_a(error == DB_SUCCESS);
/*-------------------------*/
......@@ -389,7 +393,8 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mtr_read_ulint(dict_hdr
+ DICT_HDR_FIELDS,
MLOG_4BYTES, &mtr));
MLOG_4BYTES, &mtr),
FALSE);
ut_a(error == DB_SUCCESS);
mtr_commit(&mtr);
......
......@@ -216,8 +216,6 @@ dict_build_table_def_step(
const char* path_or_name;
ibool is_path;
mtr_t mtr;
ulint i;
ulint row_len;
ut_ad(mutex_own(&(dict_sys->mutex)));
......@@ -227,14 +225,6 @@ dict_build_table_def_step(
thr_get_trx(thr)->table_id = table->id;
row_len = 0;
for (i = 0; i < table->n_def; i++) {
row_len += dict_col_get_min_size(&table->cols[i]);
}
if (row_len > BTR_PAGE_MAX_REC_SIZE) {
return(DB_TOO_BIG_RECORD);
}
if (srv_file_per_table) {
/* We create a new single-table tablespace for the table.
We initially let it be 4 pages:
......@@ -1089,7 +1079,7 @@ dict_create_index_step(
dulint index_id = node->index->id;
err = dict_index_add_to_cache(node->table, node->index,
FIL_NULL);
FIL_NULL, trx_is_strict(trx));
node->index = dict_index_get_if_in_cache_low(index_id);
ut_a(!node->index == (err != DB_SUCCESS));
......
......@@ -22,6 +22,8 @@ Created 1/8/1996 Heikki Tuuri
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0sea.h"
#include "page0zip.h"
#include "page0page.h"
#include "pars0pars.h"
#include "pars0sym.h"
#include "que0que.h"
......@@ -1262,6 +1264,156 @@ is_ord_part:
return(undo_page_len >= UNIV_PAGE_SIZE);
}
/********************************************************************
If a record of this index might not fit on a single B-tree page,
return TRUE. */
static
ibool
dict_index_too_big_for_tree(
/*========================*/
/* out: TRUE if the index
record could become too big */
const dict_table_t* table, /* in: table */
const dict_index_t* new_index) /* in: index */
{
ulint zip_size;
ulint comp;
ulint i;
/* maximum possible storage size of a record */
ulint rec_max_size;
/* maximum allowed size of a record on a leaf page */
ulint page_rec_max;
/* maximum allowed size of a node pointer record */
ulint page_ptr_max;
comp = dict_table_is_comp(table);
zip_size = dict_table_zip_size(table);
if (zip_size && zip_size < UNIV_PAGE_SIZE) {
/* On a compressed page, two records must fit in the
uncompressed page modification log. On compressed
pages with zip_size == UNIV_PAGE_SIZE, this limit will
never be reached. */
ut_ad(comp);
/* The maximum allowed record size is the size of
an empty page, minus a byte for recoding the heap
number in the page modification log. The maximum
allowed node pointer size is half that. */
page_rec_max = page_zip_empty_size(new_index->n_fields,
zip_size) - 1;
page_ptr_max = page_rec_max / 2;
/* On a compressed page, there is a two-byte entry in
the dense page directory for every record. But there
is no record header. */
rec_max_size = 2;
} else {
/* The maximum allowed record size is half a B-tree
page. No additional sparse page directory entry will
be generated for the first few user records. */
page_rec_max = page_get_free_space_of_empty(comp) / 2;
page_ptr_max = page_rec_max;
/* Each record has a header. */
rec_max_size = comp
? REC_N_NEW_EXTRA_BYTES
: REC_N_OLD_EXTRA_BYTES;
}
if (comp) {
/* Include the "null" flags in the
maximum possible record size. */
rec_max_size += UT_BITS_IN_BYTES(new_index->n_nullable);
} else {
/* For each column, include a 2-byte offset and a
"null" flag. The 1-byte format is only used in short
records that do not contain externally stored columns.
Such records could never exceed the page limit, even
when using the 2-byte format. */
rec_max_size += 2 * new_index->n_fields;
}
/* Compute the maximum possible record size. */
for (i = 0; i < new_index->n_fields; i++) {
const dict_field_t* field
= dict_index_get_nth_field(new_index, i);
const dict_col_t* col
= dict_field_get_col(field);
ulint field_max_size;
ulint field_ext_max_size;
/* In dtuple_convert_big_rec(), variable-length columns
that are longer than BTR_EXTERN_FIELD_REF_SIZE * 2
may be chosen for external storage.
Fixed-length columns, and all columns of secondary
index records are always stored inline. */
/* Determine the maximum length of the index field.
The field_ext_max_size should be computed as the worst
case in rec_get_converted_size_comp() for
REC_STATUS_ORDINARY records. */
field_max_size = dict_col_get_fixed_size(col);
if (field_max_size) {
/* dict_index_add_col() should guarantee this */
ut_ad(!field->prefix_len
|| field->fixed_len == field->prefix_len);
/* Fixed lengths are not encoded
in ROW_FORMAT=COMPACT. */
field_ext_max_size = 0;
goto add_field_size;
}
field_max_size = dict_col_get_max_size(col);
field_ext_max_size = field_max_size < 256 ? 1 : 2;
if (field->prefix_len) {
if (field->prefix_len < field_max_size) {
field_max_size = field->prefix_len;
}
} else if (field_max_size > BTR_EXTERN_FIELD_REF_SIZE * 2
&& dict_index_is_clust(new_index)) {
/* In the worst case, we have a locally stored
column of BTR_EXTERN_FIELD_REF_SIZE * 2 bytes.
The length can be stored in one byte. If the
column were stored externally, the lengths in
the clustered index page would be
BTR_EXTERN_FIELD_REF_SIZE and 2. */
field_max_size = BTR_EXTERN_FIELD_REF_SIZE * 2;
field_ext_max_size = 1;
}
if (comp) {
/* Add the extra size for ROW_FORMAT=COMPACT.
For ROW_FORMAT=REDUNDANT, these bytes were
added to rec_max_size before this loop. */
rec_max_size += field_ext_max_size;
}
add_field_size:
rec_max_size += field_max_size;
/* Check the size limit on leaf pages. */
if (UNIV_UNLIKELY(rec_max_size >= page_rec_max)) {
return(TRUE);
}
/* Check the size limit on non-leaf pages. Records
stored in non-leaf B-tree pages consist of the unique
columns of the record (the key columns of the B-tree)
and a node pointer field. When we have processed the
unique columns, rec_max_size equals the size of the
node pointer record minus the node pointer column. */
if (i + 1 == dict_index_get_n_unique_in_tree(new_index)
&& rec_max_size + REC_NODE_PTR_SIZE >= page_ptr_max) {
return(TRUE);
}
}
return(FALSE);
}
/**************************************************************************
Adds an index to the dictionary cache. */
UNIV_INTERN
......@@ -1272,7 +1424,10 @@ dict_index_add_to_cache(
dict_table_t* table, /* in: table on which the index is */
dict_index_t* index, /* in, own: index; NOTE! The index memory
object is freed in this function! */
ulint page_no)/* in: root page number of the index */
ulint page_no,/* in: root page number of the index */
ibool strict) /* in: TRUE=refuse to create the index
if records could be too big to fit in
an B-tree page */
{
dict_index_t* new_index;
ulint n_ord;
......@@ -1303,6 +1458,13 @@ dict_index_add_to_cache(
new_index->n_fields = new_index->n_def;
if (strict && dict_index_too_big_for_tree(table, new_index)) {
too_big:
dict_mem_index_free(new_index);
dict_mem_index_free(index);
return(DB_TOO_BIG_RECORD);
}
if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) {
n_ord = new_index->n_fields;
} else {
......@@ -1334,9 +1496,8 @@ dict_index_add_to_cache(
if (dict_index_too_big_for_undo(table, new_index)) {
/* An undo log record might not fit in
a single page. Refuse to create this index. */
dict_mem_index_free(new_index);
dict_mem_index_free(index);
return(DB_TOO_BIG_RECORD);
goto too_big;
}
break;
......
......@@ -765,7 +765,8 @@ dict_load_indexes(
index->id = id;
dict_load_fields(index, heap);
error = dict_index_add_to_cache(table, index, page_no);
error = dict_index_add_to_cache(table, index, page_no,
FALSE);
/* The data dictionary tables should never contain
invalid index definitions. If we ignored this error
and simply did not load this index definition, the
......
......@@ -652,6 +652,18 @@ thd_is_select(
return(thd_sql_command((const THD*) thd) == SQLCOM_SELECT);
}
/**********************************************************************
Returns true if the thread is executing in innodb_strict_mode. */
ibool
thd_is_strict(
/*==========*/
/* out: true if thd is in strict mode */
void* thd) /* in: thread handle (THD*) */
{
return(THDVAR((THD*) thd, strict_mode));
}
/************************************************************************
Obtain the InnoDB transaction of a MySQL thread. */
inline
......
......@@ -549,7 +549,7 @@ ibuf_data_init_for_space(
index->id = ut_dulint_add(DICT_IBUF_ID_MIN, space);
error = dict_index_add_to_cache(table, index,
FSP_IBUF_TREE_ROOT_PAGE_NO);
FSP_IBUF_TREE_ROOT_PAGE_NO, FALSE);
ut_a(error == DB_SUCCESS);
data->index = dict_table_get_first_index(table);
......
......@@ -716,7 +716,10 @@ dict_index_add_to_cache(
dict_table_t* table, /* in: table on which the index is */
dict_index_t* index, /* in, own: index; NOTE! The index memory
object is freed in this function! */
ulint page_no);/* in: root page number of the index */
ulint page_no,/* in: root page number of the index */
ibool strict);/* in: TRUE=refuse to create the index
if records could be too big to fit in
an B-tree page */
/**************************************************************************
Removes an index from the dictionary cache. */
UNIV_INTERN
......
......@@ -206,5 +206,14 @@ innobase_get_charset(
/* out: connection character set */
void* mysql_thd); /* in: MySQL thread handle */
/**********************************************************************
Returns true if the thread is executing in innodb_strict_mode. */
ibool
thd_is_strict(
/*==========*/
/* out: true if thd is in strict mode */
void* thd); /* in: thread handle (THD*) */
#endif
#endif
......@@ -406,6 +406,15 @@ trx_is_interrupted(
#define trx_is_interrupted(trx) FALSE
#endif /* !UNIV_HOTBACKUP */
/**************************************************************************
Determines if the currently running transaction is in innodb_strict_mode. */
UNIV_INTERN
ibool
trx_is_strict(
/*==========*/
/* out: TRUE if strict */
trx_t* trx); /* in: transaction */
/***********************************************************************
Calculates the "weight" of a transaction. The weight of one transaction
is estimated as the number of altered rows + the number of locked rows.
......
......@@ -122,15 +122,29 @@ table_schema table_name row_format
test t1 Compressed
test t2 Compact
drop table t1,t2;
SET SESSION innodb_strict_mode = on;
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
INSERT INTO t1 VALUES(
'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~(*,.02468:<>@BDFHJLNPRTVXZ\^`bdfhjlnprtvxz|~)+-/13579;=?ACEGIKMOQSUWY[]_acegikmoqsuwy{}(+.147:=@CFILORUX[^adgjmpsvy|(+.147:=@CFILORUX[^adgjmpsvy|(+.147:=@CFILORUX[^adgjmpsvy|(,048<@DHLPTX\`dhlptx|)-159=AEIMQUY]aeimquy}*.26:>BFJNRVZ^bfjnrvz~+/37;?CGKOSW[_cgkosw{(-27<AFKPUZ_dinsx}+05:?DINSX]bglqv{).38=BGLQV[`ejoty~,16;@EJOTY^chmrw|*/49>CHMRW\afkpuz(.4:@FLRX^djpv|+17=CIOU[agmsy(.4:@FLRX^djpv|+17=CIOU[agmsy(.4:@FLRX^djpv|+17=CIOU[agmsy(/6=DKRY`gnu|,3:AHOV]dkry)07>ELSZahov}-4;BIPW^elsz*18?FMT[bipw~.5<CJQX_fmt{+29@GNU\cjqx(08@HPX`hpx)19AIQYaiqy*2:BJRZbjrz+3;CKS[cks{,4<DLT\dlt|-5=EMU]emu}.6>FNV^fnv~/7?GOW_gow(1:CLU^gpy+4=FOXajs|.7@IR[dmv(1:CLU^gpy+4=FOXajs|.7@IR[dmv(1:CLU^gpy+4=',
'FOXajs|.7@IR[dmv(2<FPZdnx+5?IS]gq{.8BLV`jt~1;EOYcmw*4>HR\fpz-7AKU_is}0:DNXblv)3=GQ[eoy,6@JT^hr|/9CMWaku(3>IT_ju)4?JU`kv*5@KValw+6ALWbmx,7BMXcny-8CNYdoz.9DOZep{/:EP[fq|0;FQ\gr}1<GR]hs~2=HS^it(4@LXdp|1=IUamy.:FR^jv+7CO[gs(4@LXdp|1=IUamy.:FR^jv+7CO[gs(4@LXdp|1=IUamy.:FR^jv+7CO[gs(5BO\iv,9FS`mz0=JWdq~4AN[hu+8ER_ly/<IVcp}3@MZgt*7DQ^kx.;HUbo|2?LYfs)6CP]jw-:GTan{1>KXer(6DR`n|3AO]ky0>LZhv-;IWes*8FTbp~5CQ_m{2@N\jx/=KYgu,:HVdr)7ESao}4BP^lz1?M[iw.<JXft+9GUcq(7FUds+:IXgv.=L[jy1@O^m|4CRap(7FUds+:IXgv.=L[jy1@O^m|4CRap(7FUds+:IXgv.=L[jy1@O^m|4CRap(8HXhx1AQaq*:JZjz3CScs,<L\l|5EUeu.>N^n~7GWgw0@P`p)9IYiy2BRbr+;K[k{4DTdt-=M]m}6FVfv/?O_o(9J[l}7HYj{5FWhy3DUfw1BSdu/@Qbs->O`q+<M^o):K\m~8IZk|6GXiz4EVgx2CTev0ARct.?Par,=N_p*;L]n(:L^p+=Oas.@Rdv1CUgy4FXj|7I[m(:L^p+=Oas.@Rdv1CUgy4FXj|7');
ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. You have to change some columns to TEXT or BLOBs
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII;
ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. You have to change some columns to TEXT or BLOBs
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII;
drop table t1;
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(440)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. You have to change some columns to TEXT or BLOBs
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(439)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512));
DROP TABLE t1;
create table t1( c1 int not null, c2 blob, c3 blob, c4 blob,
primary key(c1, c2(22), c3(22)))
engine = innodb row_format = dynamic;
......
......@@ -84,20 +84,29 @@ SELECT table_schema, table_name, row_format
FROM information_schema.tables WHERE engine='innodb';
drop table t1,t2;
# data generated with
SET SESSION innodb_strict_mode = on;
--error ER_TOO_BIG_ROWSIZE
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
# random data generated with
# perl -e 'my $i,$j,$k;for($j=1;$j<19;$j++){for($i=$k=0;$k<87;$k++,$i+=$j,$i%=87){printf "%c",$i+40}}'
# truncated to 2*767 bytes and split to two 767-byte columns
--error ER_TOO_BIG_ROWSIZE
INSERT INTO t1 VALUES(
'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~(*,.02468:<>@BDFHJLNPRTVXZ\^`bdfhjlnprtvxz|~)+-/13579;=?ACEGIKMOQSUWY[]_acegikmoqsuwy{}(+.147:=@CFILORUX[^adgjmpsvy|(+.147:=@CFILORUX[^adgjmpsvy|(+.147:=@CFILORUX[^adgjmpsvy|(,048<@DHLPTX\`dhlptx|)-159=AEIMQUY]aeimquy}*.26:>BFJNRVZ^bfjnrvz~+/37;?CGKOSW[_cgkosw{(-27<AFKPUZ_dinsx}+05:?DINSX]bglqv{).38=BGLQV[`ejoty~,16;@EJOTY^chmrw|*/49>CHMRW\afkpuz(.4:@FLRX^djpv|+17=CIOU[agmsy(.4:@FLRX^djpv|+17=CIOU[agmsy(.4:@FLRX^djpv|+17=CIOU[agmsy(/6=DKRY`gnu|,3:AHOV]dkry)07>ELSZahov}-4;BIPW^elsz*18?FMT[bipw~.5<CJQX_fmt{+29@GNU\cjqx(08@HPX`hpx)19AIQYaiqy*2:BJRZbjrz+3;CKS[cks{,4<DLT\dlt|-5=EMU]emu}.6>FNV^fnv~/7?GOW_gow(1:CLU^gpy+4=FOXajs|.7@IR[dmv(1:CLU^gpy+4=FOXajs|.7@IR[dmv(1:CLU^gpy+4=',
'FOXajs|.7@IR[dmv(2<FPZdnx+5?IS]gq{.8BLV`jt~1;EOYcmw*4>HR\fpz-7AKU_is}0:DNXblv)3=GQ[eoy,6@JT^hr|/9CMWaku(3>IT_ju)4?JU`kv*5@KValw+6ALWbmx,7BMXcny-8CNYdoz.9DOZep{/:EP[fq|0;FQ\gr}1<GR]hs~2=HS^it(4@LXdp|1=IUamy.:FR^jv+7CO[gs(4@LXdp|1=IUamy.:FR^jv+7CO[gs(4@LXdp|1=IUamy.:FR^jv+7CO[gs(5BO\iv,9FS`mz0=JWdq~4AN[hu+8ER_ly/<IVcp}3@MZgt*7DQ^kx.;HUbo|2?LYfs)6CP]jw-:GTan{1>KXer(6DR`n|3AO]ky0>LZhv-;IWes*8FTbp~5CQ_m{2@N\jx/=KYgu,:HVdr)7ESao}4BP^lz1?M[iw.<JXft+9GUcq(7FUds+:IXgv.=L[jy1@O^m|4CRap(7FUds+:IXgv.=L[jy1@O^m|4CRap(7FUds+:IXgv.=L[jy1@O^m|4CRap(8HXhx1AQaq*:JZjz3CScs,<L\l|5EUeu.>N^n~7GWgw0@P`p)9IYiy2BRbr+;K[k{4DTdt-=M]m}6FVfv/?O_o(9J[l}7HYj{5FWhy3DUfw1BSdu/@Qbs->O`q+<M^o):K\m~8IZk|6GXiz4EVgx2CTev0ARct.?Par,=N_p*;L]n(:L^p+=Oas.@Rdv1CUgy4FXj|7I[m(:L^p+=Oas.@Rdv1CUgy4FXj|7');
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII;
CREATE TABLE t1(
c TEXT NOT NULL, d TEXT NOT NULL,
PRIMARY KEY (c(767),d(767)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII;
drop table t1;
--error ER_TOO_BIG_ROWSIZE
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(440)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
CREATE TABLE t1(c TEXT, PRIMARY KEY (c(439)))
ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512));
DROP TABLE t1;
#
# Test blob column inheritance (mantis issue#36)
......
......@@ -3177,6 +3177,7 @@ t1 CREATE TABLE `t1` (
CONSTRAINT `t1_t2` FOREIGN KEY (`id`) REFERENCES `t2` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=349 DEFAULT CHARSET=latin1
DROP TABLE t1,t2;
set innodb_strict_mode=on;
CREATE TABLE t1 (
c01 CHAR(255), c02 CHAR(255), c03 CHAR(255), c04 CHAR(255),
c05 CHAR(255), c06 CHAR(255), c07 CHAR(255), c08 CHAR(255),
......
......@@ -2343,6 +2343,7 @@ DROP TABLE t1,t2;
#
# Bug #21101 (Prints wrong error message if max row size is too large)
#
set innodb_strict_mode=on;
--error 1118
CREATE TABLE t1 (
c01 CHAR(255), c02 CHAR(255), c03 CHAR(255), c04 CHAR(255),
......
......@@ -34,6 +34,22 @@ UNIV_INTERN sess_t* trx_dummy_sess = NULL;
the kernel mutex */
UNIV_INTERN ulint trx_n_mysql_transactions = 0;
/**************************************************************************
Determines if the currently running transaction is in innodb_strict_mode. */
UNIV_INTERN
ibool
trx_is_strict(
/*==========*/
/* out: TRUE if strict */
trx_t* trx) /* in: transaction */
{
#ifndef UNIV_HOTBACKUP
return(trx && trx->mysql_thd && thd_is_strict(trx->mysql_thd));
#else /* UNIV_HOTBACKUP */
return(FALSE);
#endif /* UNIV_HOTBACKUP */
}
/*****************************************************************
Set detailed error message for the transaction. */
UNIV_INTERN
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment