Commit 3cd665ba authored by serg@serg.mylan's avatar serg@serg.mylan

Merge bk-internal:/home/bk/mysql-4.1/

into serg.mylan:/usr/home/serg/Abk/mysql-4.1
parents 18b4e9c9 7652c904
#!/bin/sh
if [ `tail -c1 $BK_FILE` ]
then
echo "File $BK_FILE does not end with a new-line character!"
echo ""
echo "Checkin FAILED!"
echo "Fix the problem and retry."
exit 1
fi
This diff is collapsed.
<html>
<head>
<title>Place holder for manual_toc.html</title>
</head>
<body>
This is just a place holder for the autogenerated manual_toc.html
to make "make dist" happy.
</body>
</html>
This diff is collapsed.
@c FIX AGL 20011108 Extracted from manual.texi.
@c Should only be on website with new submits by webform.
@node MySQL Testimonials, Contrib, Users, Top
@appendix MySQL Testimonials
@cindex MySQL Testimonials
The section 'MySQL Users' contains a lot of different links to
MySQL users but doesn't provide that much information about how
they are using MySQL. @xref{Users}. This section gives you an idea
of how other MySQL users are using MySQL to solve their problems.
Please note that all new stories are added on the MySQL website,
@uref{http://www.mysql.com/}.
Do let us know about @emph{your} success story too!
@itemize @bullet
@item
@strong{Peter Zaitsev of Spylog.ru} writes:
I think you might be interested in my database size. The whole database
is currently on 15 servers and I think it's about 60.000 of tables
containing about 5.000.000.000 of rows. My mostly loaded server
currently holds about 10.000 of tables with 1.000.000.000 of rows in it.
Hugest tables have about 50.000.000 of rows, and this value will raise
as soon as I'll move to 2.4 kernel with large files. Currently I have to
delete much of logs for large sites to hold table sizes in 2Gb.
@item
This diff is collapsed.
......@@ -3966,10 +3966,11 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
{
MYISAM_SHARE *share=info->s;
uint i;
if (!info->state->records) /* Don't do this if old rows */
{
MI_KEYDEF *key=share->keyinfo;
uint i;
DBUG_ASSERT(info->state->records == 0 &&
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY)) &&
......@@ -3979,7 +3980,6 @@ void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
info->update|= HA_STATE_CHANGED;
}
}
}
}
......
......@@ -916,8 +916,8 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
DBUG_ENTER("_mi_init_bulk_insert");
DBUG_PRINT("enter",("cache_size: %lu", cache_size));
if (info->bulk_insert || (rows && rows < MI_MIN_ROWS_TO_USE_BULK_INSERT))
DBUG_RETURN(0);
DBUG_ASSERT(!info->bulk_insert &&
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT));
for (i=total_keylength=num_keys=0 ; i < share->base.keys ; i++)
{
......
......@@ -416,6 +416,7 @@ typedef struct st_mi_sort_param
#define MI_MIN_SIZE_BULK_INSERT_TREE 16384 /* this is per key */
#define MI_MIN_ROWS_TO_USE_BULK_INSERT 100
#define MI_MIN_ROWS_TO_DISABLE_INDEXES 100
/* The UNIQUE check is done with a hashed long key */
......
......@@ -817,26 +817,19 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
deactivate_non_unique_index()
rows Rows to be inserted
0 if we don't know
HA_POS_ERROR if we want to disable all keys
HA_POS_ERROR if we want to force disabling
and make it permanent (save on disk)
*/
void ha_myisam::deactivate_non_unique_index(ha_rows rows)
{
MYISAM_SHARE* share = file->s;
bool do_warning=0;
if (share->state.key_map == ((ulonglong) 1L << share->base.keys)-1)
{
if (!(specialflag & SPECIAL_SAFE_MODE))
{
if (rows == HA_POS_ERROR)
{
uint orig_update= file->update;
file->update ^= HA_STATE_CHANGED;
uint check_update= file->update;
if (rows == HA_POS_ERROR) // force disable and save it on disk!
mi_extra(file, HA_EXTRA_NO_KEYS, 0);
do_warning= (file->update == check_update) && file->state->records;
file->update= orig_update;
}
else
{
/*
......@@ -846,14 +839,15 @@ void ha_myisam::deactivate_non_unique_index(ha_rows rows)
we don't want to update the key statistics based of only a few rows.
*/
if (file->state->records == 0 &&
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
mi_disable_non_unique_index(file,rows);
else
if (!file->bulk_insert &&
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
{
mi_init_bulk_insert(file,
current_thd->variables.bulk_insert_buff_size,
rows);
table->bulk_insert= 1;
}
}
}
......@@ -862,10 +856,6 @@ void ha_myisam::deactivate_non_unique_index(ha_rows rows)
}
else
enable_activate_all_index=0;
if (do_warning)
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_ILLEGAL_HA,
ER(ER_ILLEGAL_HA), table->table_name);
}
......@@ -877,7 +867,6 @@ bool ha_myisam::activate_all_index(THD *thd)
DBUG_ENTER("activate_all_index");
mi_end_bulk_insert(file);
table->bulk_insert= 0;
if (enable_activate_all_index &&
share->state.key_map != set_bits(ulonglong, share->base.keys))
{
......@@ -1392,7 +1381,7 @@ longlong ha_myisam::get_auto_increment()
return auto_increment_value;
}
if (table->bulk_insert)
/* it's safe to call the following if bulk_insert isn't on */
mi_flush_bulk_insert(file, table->next_number_index);
longlong nr;
......
......@@ -280,7 +280,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
table->next_number_field=table->found_next_number_field;
VOID(table->file->extra_opt(HA_EXTRA_WRITE_CACHE,
thd->variables.read_buff_size));
table->bulk_insert= 1;
if (handle_duplicates == DUP_IGNORE ||
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
......
......@@ -121,7 +121,7 @@ struct st_table {
my_bool maybe_null,outer_join; /* Used with OUTER JOIN */
my_bool force_index;
my_bool distinct,const_table,no_rows;
my_bool key_read, bulk_insert;
my_bool key_read;
my_bool crypted;
my_bool db_low_byte_first; /* Portable row format */
my_bool locked_by_flush;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment