Commit 9a4dd4ad authored by Marko Mäkelä's avatar Marko Mäkelä

Remove trailing white space.

parent aacf9a80
...@@ -1233,7 +1233,7 @@ buf_pool_init_instance( ...@@ -1233,7 +1233,7 @@ buf_pool_init_instance(
buf_pool->page_hash = hash_create(2 * buf_pool->curr_size); buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size); buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
buf_pool->last_printout_time = ut_time(); buf_pool->last_printout_time = ut_time();
} }
/* 2. Initialize flushing fields /* 2. Initialize flushing fields
...@@ -1365,11 +1365,11 @@ buf_pool_drop_hash_index_instance( ...@@ -1365,11 +1365,11 @@ buf_pool_drop_hash_index_instance(
/* block->is_hashed cannot be modified /* block->is_hashed cannot be modified
when we have an x-latch on btr_search_latch; when we have an x-latch on btr_search_latch;
see the comment in buf0buf.h */ see the comment in buf0buf.h */
if (!block->is_hashed) { if (!block->is_hashed) {
continue; continue;
} }
/* To follow the latching order, we /* To follow the latching order, we
have to release btr_search_latch have to release btr_search_latch
before acquiring block->latch. */ before acquiring block->latch. */
...@@ -1378,14 +1378,14 @@ buf_pool_drop_hash_index_instance( ...@@ -1378,14 +1378,14 @@ buf_pool_drop_hash_index_instance(
we must rescan all blocks, because we must rescan all blocks, because
some may become hashed again. */ some may become hashed again. */
*released_search_latch = TRUE; *released_search_latch = TRUE;
rw_lock_x_lock(&block->lock); rw_lock_x_lock(&block->lock);
/* This should be guaranteed by the /* This should be guaranteed by the
callers, which will be holding callers, which will be holding
btr_search_enabled_mutex. */ btr_search_enabled_mutex. */
ut_ad(!btr_search_enabled); ut_ad(!btr_search_enabled);
/* Because we did not buffer-fix the /* Because we did not buffer-fix the
block by calling buf_block_get_gen(), block by calling buf_block_get_gen(),
it is possible that the block has been it is possible that the block has been
...@@ -1395,7 +1395,7 @@ buf_pool_drop_hash_index_instance( ...@@ -1395,7 +1395,7 @@ buf_pool_drop_hash_index_instance(
block is mapped to. All we want to do block is mapped to. All we want to do
is to drop any hash entries referring is to drop any hash entries referring
to the page. */ to the page. */
/* It is possible that /* It is possible that
block->page.state != BUF_FILE_PAGE. block->page.state != BUF_FILE_PAGE.
Even that does not matter, because Even that does not matter, because
...@@ -1403,18 +1403,18 @@ buf_pool_drop_hash_index_instance( ...@@ -1403,18 +1403,18 @@ buf_pool_drop_hash_index_instance(
check block->is_hashed before doing check block->is_hashed before doing
anything. block->is_hashed can only anything. block->is_hashed can only
be set on uncompressed file pages. */ be set on uncompressed file pages. */
btr_search_drop_page_hash_index(block); btr_search_drop_page_hash_index(block);
rw_lock_x_unlock(&block->lock); rw_lock_x_unlock(&block->lock);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
ut_ad(!btr_search_enabled); ut_ad(!btr_search_enabled);
} }
} }
} }
/********************************************************************//** /********************************************************************//**
Drops the adaptive hash index. To prevent a livelock, this function Drops the adaptive hash index. To prevent a livelock, this function
is only to be called while holding btr_search_latch and while is only to be called while holding btr_search_latch and while
...@@ -1990,30 +1990,30 @@ buf_pool_resize(void) ...@@ -1990,30 +1990,30 @@ buf_pool_resize(void)
ulint min_change_size = 1048576 * srv_buf_pool_instances; ulint min_change_size = 1048576 * srv_buf_pool_instances;
buf_pool_mutex_enter_all(); buf_pool_mutex_enter_all();
if (srv_buf_pool_old_size == srv_buf_pool_size) { if (srv_buf_pool_old_size == srv_buf_pool_size) {
buf_pool_mutex_exit_all(); buf_pool_mutex_exit_all();
return; return;
} else if (srv_buf_pool_curr_size + min_change_size } else if (srv_buf_pool_curr_size + min_change_size
> srv_buf_pool_size) { > srv_buf_pool_size) {
change_size = (srv_buf_pool_curr_size - srv_buf_pool_size) change_size = (srv_buf_pool_curr_size - srv_buf_pool_size)
/ UNIV_PAGE_SIZE; / UNIV_PAGE_SIZE;
buf_pool_mutex_exit_all(); buf_pool_mutex_exit_all();
/* Disable adaptive hash indexes and empty the index /* Disable adaptive hash indexes and empty the index
in order to free up memory in the buffer pool chunks. */ in order to free up memory in the buffer pool chunks. */
buf_pool_shrink(change_size); buf_pool_shrink(change_size);
} else if (srv_buf_pool_curr_size + min_change_size } else if (srv_buf_pool_curr_size + min_change_size
< srv_buf_pool_size) { < srv_buf_pool_size) {
/* Enlarge the buffer pool by at least one megabyte */ /* Enlarge the buffer pool by at least one megabyte */
change_size = srv_buf_pool_size - srv_buf_pool_curr_size; change_size = srv_buf_pool_size - srv_buf_pool_curr_size;
buf_pool_mutex_exit_all(); buf_pool_mutex_exit_all();
...@@ -2026,10 +2026,10 @@ buf_pool_resize(void) ...@@ -2026,10 +2026,10 @@ buf_pool_resize(void)
return; return;
} }
buf_pool_page_hash_rebuild(); buf_pool_page_hash_rebuild();
} }
/****************************************************************//** /****************************************************************//**
Remove the sentinel block for the watch before replacing it with a real block. Remove the sentinel block for the watch before replacing it with a real block.
buf_page_watch_clear() or buf_page_watch_occurred() will notice that buf_page_watch_clear() or buf_page_watch_occurred() will notice that
...@@ -2890,7 +2890,7 @@ buf_page_get_gen( ...@@ -2890,7 +2890,7 @@ buf_page_get_gen(
Try again later. */ Try again later. */
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
os_thread_sleep(WAIT_FOR_READ); os_thread_sleep(WAIT_FOR_READ);
goto loop; goto loop;
} }
...@@ -4806,7 +4806,7 @@ buf_get_modified_ratio_pct(void) ...@@ -4806,7 +4806,7 @@ buf_get_modified_ratio_pct(void)
buf_get_total_list_len(&lru_len, &free_len, &flush_list_len); buf_get_total_list_len(&lru_len, &free_len, &flush_list_len);
ratio = (100 * flush_list_len) / (1 + lru_len + free_len); ratio = (100 * flush_list_len) / (1 + lru_len + free_len);
/* 1 + is there to avoid division by zero */ /* 1 + is there to avoid division by zero */
return(ratio); return(ratio);
...@@ -5189,7 +5189,7 @@ buf_all_freed(void) ...@@ -5189,7 +5189,7 @@ buf_all_freed(void)
return(TRUE); return(TRUE);
} }
/*********************************************************************//** /*********************************************************************//**
Checks that there currently are no pending i/o-operations for the buffer Checks that there currently are no pending i/o-operations for the buffer
pool. pool.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment