Commit ea9a89da authored by marko's avatar marko

branches/zip: page_zip_get_trailer_len(), page_zip_available():

Replace the parameter "dict_index_t* index" with "ibool is_clust".

dict_index_is_clust(): Add __attribute__((pure)).
parent df5a9376
......@@ -515,7 +515,8 @@ dict_index_is_clust(
/*================*/
/* out: nonzero for clustered index,
zero for other indexes */
const dict_index_t* index); /* in: index */
const dict_index_t* index) /* in: index */
__attribute__((pure));
/************************************************************************
Gets the number of user-defined columns in a table in the dictionary
......
......@@ -143,7 +143,7 @@ page_zip_available(
/* out: TRUE if page_zip_write_rec()
will succeed */
const page_zip_des_t* page_zip,/* in: compressed page */
dict_index_t* index, /* in: index of the B-tree node */
ibool is_clust,/* in: TRUE if clustered index */
ulint length, /* in: combined size of the record */
ulint create) /* in: nonzero=add the record to
the heap */
......
......@@ -208,7 +208,7 @@ page_zip_get_trailer_len(
in bytes, not including the terminating
zero byte of the modification log */
const page_zip_des_t* page_zip,/* in: compressed page */
dict_index_t* index, /* in: index of the B-tree node */
ibool is_clust,/* in: TRUE if clustered index */
ulint* entry_size)/* out: size of the uncompressed
portion of a user record */
{
......@@ -220,7 +220,7 @@ page_zip_get_trailer_len(
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE
+ REC_NODE_PTR_SIZE;
ut_ad(!page_zip->n_blobs);
} else if (dict_index_is_clust(index)) {
} else if (UNIV_UNLIKELY(is_clust)) {
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE
+ DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
} else {
......@@ -246,7 +246,7 @@ page_zip_available(
/* out: TRUE if enough space
is available */
const page_zip_des_t* page_zip,/* in: compressed page */
dict_index_t* index, /* in: index of the B-tree node */
ibool is_clust,/* in: TRUE if clustered index */
ulint length, /* in: combined size of the record */
ulint create) /* in: nonzero=add the record to
the heap */
......@@ -256,7 +256,7 @@ page_zip_available(
ut_ad(length > REC_N_NEW_EXTRA_BYTES);
trailer_len = page_zip_get_trailer_len(page_zip, index,
trailer_len = page_zip_get_trailer_len(page_zip, is_clust,
&uncompressed_size);
/* Subtract the fixed extra bytes and add the maximum
......@@ -314,7 +314,8 @@ page_zip_alloc(
ut_a(page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
if (page_zip_available(page_zip, index, length, create)) {
if (page_zip_available(page_zip, dict_index_is_clust(index),
length, create)) {
return(TRUE);
}
......@@ -330,7 +331,8 @@ page_zip_alloc(
}
/* Check if there is enough space available after compression. */
return(page_zip_available(page_zip, index, length, create));
return(page_zip_available(page_zip, dict_index_is_clust(index),
length, create));
}
/**************************************************************************
......
......@@ -1175,7 +1175,8 @@ page_cur_insert_rec_zip(
rec_size = rec_offs_size(offsets);
/* 2. Try to find suitable space from page memory management */
if (!page_zip_available(page_zip, index, rec_size, 1)) {
if (!page_zip_available(page_zip, dict_index_is_clust(index),
rec_size, 1)) {
/* Try compressing the whole page afterwards. */
insert_rec = page_cur_insert_rec_low(*current_rec,
......
......@@ -1928,7 +1928,8 @@ zlib_done:
}
page_zip->m_end = mod_log_ptr - page_zip->data;
page_zip->m_nonempty = mod_log_ptr != d_stream->next_in;
ut_a(page_zip_get_trailer_len(page_zip, index, NULL)
ut_a(page_zip_get_trailer_len(page_zip,
dict_index_is_clust(index), NULL)
+ page_zip->m_end < page_zip_get_size(page_zip));
}
......@@ -1973,6 +1974,8 @@ page_zip_decompress_sec(
| PAGE_HEAP_NO_USER_LOW << REC_HEAP_NO_SHIFT;
ulint slot;
ut_a(!dict_index_is_clust(index));
/* Subtract the space reserved for uncompressed data. */
d_stream->avail_in -= n_dense * PAGE_ZIP_DIR_SLOT_SIZE;
......@@ -2065,7 +2068,7 @@ zlib_done:
}
page_zip->m_end = mod_log_ptr - page_zip->data;
page_zip->m_nonempty = mod_log_ptr != d_stream->next_in;
ut_a(page_zip_get_trailer_len(page_zip, index, NULL)
ut_a(page_zip_get_trailer_len(page_zip, FALSE, NULL)
+ page_zip->m_end < page_zip_get_size(page_zip));
}
......@@ -2201,6 +2204,8 @@ page_zip_decompress_clust(
const byte* storage;
const byte* externs;
ut_a(dict_index_is_clust(index));
/* Subtract the space reserved for uncompressed data. */
d_stream->avail_in -= n_dense * (PAGE_ZIP_DIR_SLOT_SIZE
+ DATA_TRX_ID_LEN
......@@ -2356,7 +2361,7 @@ zlib_done:
}
page_zip->m_end = mod_log_ptr - page_zip->data;
page_zip->m_nonempty = mod_log_ptr != d_stream->next_in;
ut_a(page_zip_get_trailer_len(page_zip, index, NULL)
ut_a(page_zip_get_trailer_len(page_zip, TRUE, NULL)
+ page_zip->m_end < page_zip_get_size(page_zip));
}
......@@ -3370,7 +3375,8 @@ page_zip_clear_rec(
#endif /* UNIV_ZIP_DEBUG */
page_zip->m_end
+ 1 + ((heap_no - 1) >= 64)/* size of the log entry */
+ page_zip_get_trailer_len(page_zip, index, NULL)
+ page_zip_get_trailer_len(page_zip,
dict_index_is_clust(index), NULL)
< page_zip_get_size(page_zip)) {
byte* data;
......@@ -3909,7 +3915,8 @@ page_zip_copy(
memcpy(page_zip, src_zip, sizeof *page_zip);
page_zip->data = data;
}
ut_ad(page_zip_get_trailer_len(page_zip, index, NULL)
ut_ad(page_zip_get_trailer_len(page_zip,
dict_index_is_clust(index), NULL)
+ page_zip->m_end < page_zip_get_size(page_zip));
if (!page_is_leaf(src)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment