buf0buf.c 121 KB
Newer Older
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1
/*****************************************************************************
2

3
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4
Copyright (c) 2008, Google Inc.
5

Vadim Tkachenko's avatar
Vadim Tkachenko committed
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
briefly in the InnoDB documentation. The contributions by Google are
incorporated with their permission, and subject to the conditions contained in
the file COPYING.Google.

This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.

This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA

*****************************************************************************/
25

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
26 27
/**************************************************//**
@file buf/buf0buf.c
28 29 30 31 32 33 34 35 36 37 38 39 40 41
The database buffer buf_pool

Created 11/5/1995 Heikki Tuuri
*******************************************************/

#include "buf0buf.h"

#ifdef UNIV_NONINL
#include "buf0buf.ic"
#endif

#include "mem0mem.h"
#include "btr0btr.h"
#include "fil0fil.h"
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
42 43
#ifndef UNIV_HOTBACKUP
#include "buf0buddy.h"
44 45 46 47
#include "lock0lock.h"
#include "btr0sea.h"
#include "ibuf0ibuf.h"
#include "trx0undo.h"
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
48 49
#include "log0log.h"
#endif /* !UNIV_HOTBACKUP */
50
#include "srv0srv.h"
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
51 52
#include "dict0dict.h"
#include "log0recv.h"
53
#include "page0zip.h"
54
#include "trx0trx.h"
55
#include "srv0start.h"
56

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/* prototypes for new functions added to ha_innodb.cc */
trx_t* innobase_get_trx();

inline void _increment_page_get_statistics(buf_block_t* block, trx_t* trx)
{
	ulint           block_hash;
	ulint           block_hash_byte;
	byte            block_hash_offset;

	ut_ad(block);

	if (!innobase_get_slow_log() || !trx || !trx->take_stats)
		return;

	if (!trx->distinct_page_access_hash) {
		trx->distinct_page_access_hash = mem_alloc(DPAH_SIZE);
		memset(trx->distinct_page_access_hash, 0, DPAH_SIZE);
	}

	block_hash = ut_hash_ulint((block->page.space << 20) + block->page.space +
					block->page.offset, DPAH_SIZE << 3);
	block_hash_byte = block_hash >> 3;
	block_hash_offset = (byte) block_hash & 0x07;
80
	if (block_hash_byte >= DPAH_SIZE)
81
		fprintf(stderr, "!!! block_hash_byte = %lu  block_hash_offset = %d !!!\n", block_hash_byte, block_hash_offset);
82
	if (block_hash_offset > 7)
83
		fprintf(stderr, "!!! block_hash_byte = %lu  block_hash_offset = %d !!!\n", block_hash_byte, block_hash_offset);
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
84 85 86 87 88 89
	if ((trx->distinct_page_access_hash[block_hash_byte] & ((byte) 0x01 << block_hash_offset)) == 0)
		trx->distinct_page_access++;
	trx->distinct_page_access_hash[block_hash_byte] |= (byte) 0x01 << block_hash_offset;
	return;
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/*
		IMPLEMENTATION OF THE BUFFER POOL
		=================================

Performance improvement:
------------------------
Thread scheduling in NT may be so slow that the OS wait mechanism should
not be used even in waiting for disk reads to complete.
Rather, we should put waiting query threads to the queue of
waiting jobs, and let the OS thread do something useful while the i/o
is processed. In this way we could remove most OS thread switches in
an i/o-intensive benchmark like TPC-C.

A possibility is to put a user space thread library between the database
and NT. User space thread libraries might be very fast.

SQL Server 7.0 can be configured to use 'fibers' which are lightweight
threads in NT. These should be studied.

		Buffer frames and blocks
		------------------------
Following the terminology of Gray and Reuter, we call the memory
blocks where file pages are loaded buffer frames. For each buffer
frame there is a control block, or shortly, a block, in the buffer
control array. The control info which does not need to be stored
in the file along with the file page, resides in the control block.

		Buffer pool struct
		------------------
The buffer buf_pool contains a single mutex which protects all the
control data structures of the buf_pool. The content of a buffer frame is
protected by a separate read-write lock in its control block, though.
These locks can be locked and unlocked without owning the buf_pool mutex.
The OS events in the buf_pool struct can be waited for without owning the
buf_pool mutex.

The buf_pool mutex is a hot-spot in main memory, causing a lot of
memory bus traffic on multiprocessor systems when processors
alternately access the mutex. On our Pentium, the mutex is accessed
maybe every 10 microseconds. We gave up the solution to have mutexes
for each control block, for instance, because it seemed to be
complicated.

A solution to reduce mutex contention of the buf_pool mutex is to
create a separate mutex for the page hash table. On Pentium,
accessing the hash table takes 2 microseconds, about half
of the total buf_pool mutex hold time.

		Control blocks
		--------------

The control block contains, for instance, the bufferfix count
which is incremented when a thread wants a file page to be fixed
in a buffer frame. The bufferfix operation does not lock the
contents of the frame, however. For this purpose, the control
block contains a read-write lock.

The buffer frames have to be aligned so that the start memory
address of a frame is divisible by the universal page size, which
is a power of two.

We intend to make the buffer buf_pool size on-line reconfigurable,
that is, the buf_pool size can be changed without closing the database.
Then the database administarator may adjust it to be bigger
at night, for example. The control block array must
contain enough control blocks for the maximum buffer buf_pool size
which is used in the particular database.
If the buf_pool size is cut, we exploit the virtual memory mechanism of
the OS, and just refrain from using frames at high addresses. Then the OS
can swap them to disk.

The control blocks containing file pages are put to a hash table
according to the file address of the page.
We could speed up the access to an individual page by using
"pointer swizzling": we could replace the page references on
non-leaf index pages by direct pointers to the page, if it exists
in the buf_pool. We could make a separate hash table where we could
chain all the page references in non-leaf pages residing in the buf_pool,
using the page reference as the hash key,
and at the time of reading of a page update the pointers accordingly.
Drawbacks of this solution are added complexity and,
possibly, extra space required on non-leaf pages for memory pointers.
A simpler solution is just to speed up the hash table mechanism
in the database, using tables whose size is a power of 2.

		Lists of blocks
		---------------

There are several lists of control blocks.

The free list (buf_pool->free) contains blocks which are currently not
used.

The common LRU list contains all the blocks holding a file page
except those for which the bufferfix count is non-zero.
The pages are in the LRU list roughly in the order of the last
access to the page, so that the oldest pages are at the end of the
list. We also keep a pointer to near the end of the LRU list,
which we can use when we want to artificially age a page in the
buf_pool. This is used if we know that some page is not needed
again for some time: we insert the block right after the pointer,
causing it to be replaced sooner than would noramlly be the case.
Currently this aging mechanism is used for read-ahead mechanism
of pages, and it can also be used when there is a scan of a full
table which cannot fit in the memory. Putting the pages near the
of the LRU list, we make sure that most of the buf_pool stays in the
main memory, undisturbed.

The unzip_LRU list contains a subset of the common LRU list.  The
blocks on the unzip_LRU list hold a compressed file page and the
corresponding uncompressed page frame.  A block is in unzip_LRU if and
only if the predicate buf_page_belongs_to_unzip_LRU(&block->page)
holds.  The blocks in unzip_LRU will be in same order as they are in
the common LRU list.  That is, each manipulation of the common LRU
list will result in the same manipulation of the unzip_LRU list.

The chain of modified blocks (buf_pool->flush_list) contains the blocks
holding file pages that have been modified in the memory
but not written to disk yet. The block with the oldest modification
which has not yet been written to disk is at the end of the chain.

The chain of unmodified compressed blocks (buf_pool->zip_clean)
contains the control blocks (buf_page_t) of those compressed pages
that are not in buf_pool->flush_list and for which no uncompressed
page has been allocated in the buffer pool.  The control blocks for
uncompressed pages are accessible via buf_block_t objects that are
reachable via buf_pool->chunks[].

The chains of free memory blocks (buf_pool->zip_free[]) are used by
the buddy allocator (buf0buddy.c) to keep track of currently unused
memory blocks of size sizeof(buf_page_t)..UNIV_PAGE_SIZE / 2.  These
blocks are inside the UNIV_PAGE_SIZE-sized memory blocks of type
BUF_BLOCK_MEMORY that the buddy allocator requests from the buffer
pool.  The buddy allocator is solely used for allocating control
blocks for compressed pages (buf_page_t) and compressed page frames.

		Loading a file page
		-------------------

First, a victim block for replacement has to be found in the
buf_pool. It is taken from the free list or searched for from the
end of the LRU-list. An exclusive lock is reserved for the frame,
the io_fix field is set in the block fixing the block in buf_pool,
and the io-operation for loading the page is queued. The io-handler thread
releases the X-lock on the frame and resets the io_fix field
when the io operation completes.

A thread may request the above operation using the function
buf_page_get(). It may then continue to request a lock on the frame.
The lock is granted when the io-handler releases the x-lock.

		Read-ahead
		----------

The read-ahead mechanism is intended to be intelligent and
isolated from the semantically higher levels of the database
index management. From the higher level we only need the
information if a file page has a natural successor or
predecessor page. On the leaf level of a B-tree index,
these are the next and previous pages in the natural
order of the pages.

Let us first explain the read-ahead mechanism when the leafs
of a B-tree are scanned in an ascending or descending order.
When a read page is the first time referenced in the buf_pool,
the buffer manager checks if it is at the border of a so-called
linear read-ahead area. The tablespace is divided into these
areas of size 64 blocks, for example. So if the page is at the
border of such an area, the read-ahead mechanism checks if
all the other blocks in the area have been accessed in an
ascending or descending order. If this is the case, the system
looks at the natural successor or predecessor of the page,
checks if that is at the border of another area, and in this case
issues read-requests for all the pages in that area. Maybe
we could relax the condition that all the pages in the area
have to be accessed: if data is deleted from a table, there may
appear holes of unused pages in the area.

A different read-ahead mechanism is used when there appears
to be a random access pattern to a file.
If a new page is referenced in the buf_pool, and several pages
of its random access area (for instance, 32 consecutive pages
in a tablespace) have recently been referenced, we may predict
that the whole area may be needed in the near future, and issue
the read requests for the whole area.
*/

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
277 278
#ifndef UNIV_HOTBACKUP
/** Value in microseconds */
279
static const int WAIT_FOR_READ	= 5000;
280 281
/** Number of attemtps made to read in a page in the buffer pool */
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
282

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
283
/** The buffer buf_pool of the database */
284 285
UNIV_INTERN buf_pool_t*	buf_pool = NULL;

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
286
/** mutex protecting the buffer pool struct and control blocks, except the
287 288
read-write lock in them */
UNIV_INTERN mutex_t		buf_pool_mutex;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
289 290 291 292 293 294
UNIV_INTERN mutex_t		LRU_list_mutex;
UNIV_INTERN mutex_t		flush_list_mutex;
UNIV_INTERN rw_lock_t		page_hash_latch;
UNIV_INTERN mutex_t		free_list_mutex;
UNIV_INTERN mutex_t		zip_free_mutex;
UNIV_INTERN mutex_t		zip_hash_mutex;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
295
/** mutex protecting the control blocks of compressed-only pages
296 297 298 299
(of type buf_page_t, not buf_block_t) */
UNIV_INTERN mutex_t		buf_pool_zip_mutex;

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
300
static ulint	buf_dbg_counter	= 0; /*!< This is used to insert validation
301 302 303
					operations in excution in the
					debug version */
/** Flag to forbid the release of the buffer pool mutex.
Vadim Tkachenko's avatar
Vadim Tkachenko committed
304
Protected by buf_pool_mutex. */
305 306 307
UNIV_INTERN ulint		buf_pool_mutex_exit_forbidden = 0;
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
308
/** If this is set TRUE, the program prints info whenever
309 310 311 312
read-ahead or flush occurs */
UNIV_INTERN ibool		buf_debug_prints = FALSE;
#endif /* UNIV_DEBUG */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
313
#endif /* !UNIV_HOTBACKUP */
314

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
315
/********************************************************************//**
316 317
Calculates a page checksum which is stored to the page when it is written
to a file. Note that we must be careful to calculate the same value on
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
318 319
32-bit and 64-bit architectures.
@return	checksum */
320 321 322 323
UNIV_INTERN
ulint
buf_calc_page_new_checksum(
/*=======================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
324
	const byte*	page)	/*!< in: buffer page */
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
{
	ulint checksum;

	/* Since the field FIL_PAGE_FILE_FLUSH_LSN, and in versions <= 4.1.x
	..._ARCH_LOG_NO, are written outside the buffer pool to the first
	pages of data files, we have to skip them in the page checksum
	calculation.
	We must also skip the field FIL_PAGE_SPACE_OR_CHKSUM where the
	checksum is stored, and also the last 8 bytes of page because
	there we store the old formula checksum. */

	checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
				  FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
		+ ut_fold_binary(page + FIL_PAGE_DATA,
				 UNIV_PAGE_SIZE - FIL_PAGE_DATA
				 - FIL_PAGE_END_LSN_OLD_CHKSUM);
	checksum = checksum & 0xFFFFFFFFUL;

	return(checksum);
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
UNIV_INTERN
ulint
buf_calc_page_new_checksum_32(
/*==========================*/
	const byte*	page)	/*!< in: buffer page */
{
	ulint checksum;

	checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
				  FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
		+ ut_fold_binary(page + FIL_PAGE_DATA,
				 FIL_PAGE_DATA_ALIGN_32 - FIL_PAGE_DATA)
		+ ut_fold_binary_32(page + FIL_PAGE_DATA_ALIGN_32,
				    UNIV_PAGE_SIZE - FIL_PAGE_DATA_ALIGN_32
				    - FIL_PAGE_END_LSN_OLD_CHKSUM);

	checksum = checksum & 0xFFFFFFFFUL;

	return(checksum);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
367
/********************************************************************//**
368 369 370 371 372
In versions < 4.0.14 and < 4.1.1 there was a bug that the checksum only
looked at the first few bytes of the page. This calculates that old
checksum.
NOTE: we must first store the new formula checksum to
FIL_PAGE_SPACE_OR_CHKSUM before calculating and storing this old checksum
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
373 374
because this takes that field as an input!
@return	checksum */
375 376 377 378
UNIV_INTERN
ulint
buf_calc_page_old_checksum(
/*=======================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
379
	const byte*	page)	/*!< in: buffer page */
380 381 382 383 384 385 386 387 388 389
{
	ulint checksum;

	checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);

	checksum = checksum & 0xFFFFFFFFUL;

	return(checksum);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
390 391 392
/********************************************************************//**
Checks if a page is corrupt.
@return	TRUE if corrupted */
393 394 395 396
UNIV_INTERN
ibool
buf_page_is_corrupted(
/*==================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
397 398
	const byte*	read_buf,	/*!< in: a database page */
	ulint		zip_size)	/*!< in: size of compressed page;
399 400 401 402
					0 for uncompressed pages */
{
	ulint		checksum_field;
	ulint		old_checksum_field;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
403

404 405 406 407 408 409 410 411 412 413 414 415
	if (UNIV_LIKELY(!zip_size)
	    && memcmp(read_buf + FIL_PAGE_LSN + 4,
		      read_buf + UNIV_PAGE_SIZE
		      - FIL_PAGE_END_LSN_OLD_CHKSUM + 4, 4)) {

		/* Stored log sequence numbers at the start and the end
		of page do not match */

		return(TRUE);
	}

#ifndef UNIV_HOTBACKUP
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
416 417 418 419 420
	if (recv_lsn_checks_on) {
		ib_uint64_t	current_lsn;

		if (log_peek_lsn(&current_lsn)
		    && current_lsn < mach_read_ull(read_buf + FIL_PAGE_LSN)) {
421 422 423 424 425 426 427 428 429 430 431
			ut_print_timestamp(stderr);

			fprintf(stderr,
				"  InnoDB: Error: page %lu log sequence number"
				" %llu\n"
				"InnoDB: is in the future! Current system "
				"log sequence number %llu.\n"
				"InnoDB: Your database may be corrupt or "
				"you may have copied the InnoDB\n"
				"InnoDB: tablespace but not the InnoDB "
				"log files. See\n"
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
432
				"InnoDB: " REFMAN "forcing-recovery.html\n"
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
				"InnoDB: for more information.\n",
				(ulong) mach_read_from_4(read_buf
							 + FIL_PAGE_OFFSET),
				mach_read_ull(read_buf + FIL_PAGE_LSN),
				current_lsn);
		}
	}
#endif

	/* If we use checksums validation, make additional check before
	returning TRUE to ensure that the checksum is not equal to
	BUF_NO_CHECKSUM_MAGIC which might be stored by InnoDB with checksums
	disabled. Otherwise, skip checksum calculation and return FALSE */

	if (UNIV_LIKELY(srv_use_checksums)) {
		checksum_field = mach_read_from_4(read_buf
						  + FIL_PAGE_SPACE_OR_CHKSUM);

		if (UNIV_UNLIKELY(zip_size)) {
			return(checksum_field != BUF_NO_CHECKSUM_MAGIC
			       && checksum_field
			       != page_zip_calc_checksum(read_buf, zip_size));
		}

		old_checksum_field = mach_read_from_4(
			read_buf + UNIV_PAGE_SIZE
			- FIL_PAGE_END_LSN_OLD_CHKSUM);

		/* There are 2 valid formulas for old_checksum_field:

		1. Very old versions of InnoDB only stored 8 byte lsn to the
		start and the end of the page.

		2. Newer InnoDB versions store the old formula checksum
		there. */

		if (old_checksum_field != mach_read_from_4(read_buf
							   + FIL_PAGE_LSN)
		    && old_checksum_field != BUF_NO_CHECKSUM_MAGIC
		    && old_checksum_field
		    != buf_calc_page_old_checksum(read_buf)) {

			return(TRUE);
		}

		/* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id
Vadim Tkachenko's avatar
Vadim Tkachenko committed
479
		(always equal to 0), to FIL_PAGE_SPACE_OR_CHKSUM */
480

481 482
		if (!srv_fast_checksum
		    && checksum_field != 0
483 484 485 486 487 488
		    && checksum_field != BUF_NO_CHECKSUM_MAGIC
		    && checksum_field
		    != buf_calc_page_new_checksum(read_buf)) {

			return(TRUE);
		}
489 490 491 492 493 494 495 496 497 498 499

		if (srv_fast_checksum
		    && checksum_field != 0
		    && checksum_field != BUF_NO_CHECKSUM_MAGIC
		    && checksum_field
		    != buf_calc_page_new_checksum_32(read_buf)
		    && checksum_field
		    != buf_calc_page_new_checksum(read_buf)) {

			return(TRUE);
		}
500 501 502 503 504
	}

	return(FALSE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
505
/********************************************************************//**
506 507 508 509 510
Prints a page to stderr. */
UNIV_INTERN
void
buf_page_print(
/*===========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
511 512
	const byte*	read_buf,	/*!< in: a database page */
	ulint		zip_size)	/*!< in: compressed page size, or
513 514
				0 for uncompressed pages */
{
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
515
#ifndef UNIV_HOTBACKUP
516
	dict_index_t*	index;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
517
#endif /* !UNIV_HOTBACKUP */
518
	ulint		checksum;
519
	ulint		checksum_32;
520 521 522 523 524 525 526 527 528 529 530
	ulint		old_checksum;
	ulint		size	= zip_size;

	if (!size) {
		size = UNIV_PAGE_SIZE;
	}

	ut_print_timestamp(stderr);
	fprintf(stderr, "  InnoDB: Page dump in ascii and hex (%lu bytes):\n",
		(ulong) size);
	ut_print_buf(stderr, read_buf, size);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
531
	fputs("\nInnoDB: End of page dump\n", stderr);
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605

	if (zip_size) {
		/* Print compressed page. */

		switch (fil_page_get_type(read_buf)) {
		case FIL_PAGE_TYPE_ZBLOB:
		case FIL_PAGE_TYPE_ZBLOB2:
			checksum = srv_use_checksums
				? page_zip_calc_checksum(read_buf, zip_size)
				: BUF_NO_CHECKSUM_MAGIC;
			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: Compressed BLOB page"
				" checksum %lu, stored %lu\n"
				"InnoDB: Page lsn %lu %lu\n"
				"InnoDB: Page number (if stored"
				" to page already) %lu,\n"
				"InnoDB: space id (if stored"
				" to page already) %lu\n",
				(ulong) checksum,
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_LSN),
				(ulong) mach_read_from_4(
					read_buf + (FIL_PAGE_LSN + 4)),
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_OFFSET),
				(ulong) mach_read_from_4(
					read_buf
					+ FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
			return;
		default:
			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: unknown page type %lu,"
				" assuming FIL_PAGE_INDEX\n",
				fil_page_get_type(read_buf));
			/* fall through */
		case FIL_PAGE_INDEX:
			checksum = srv_use_checksums
				? page_zip_calc_checksum(read_buf, zip_size)
				: BUF_NO_CHECKSUM_MAGIC;

			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: Compressed page checksum %lu,"
				" stored %lu\n"
				"InnoDB: Page lsn %lu %lu\n"
				"InnoDB: Page number (if stored"
				" to page already) %lu,\n"
				"InnoDB: space id (if stored"
				" to page already) %lu\n",
				(ulong) checksum,
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_LSN),
				(ulong) mach_read_from_4(
					read_buf + (FIL_PAGE_LSN + 4)),
				(ulong) mach_read_from_4(
					read_buf + FIL_PAGE_OFFSET),
				(ulong) mach_read_from_4(
					read_buf
					+ FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
			return;
		case FIL_PAGE_TYPE_XDES:
			/* This is an uncompressed page. */
			break;
		}
	}

	checksum = srv_use_checksums
		? buf_calc_page_new_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
606 607
	checksum_32 = srv_use_checksums
		? buf_calc_page_new_checksum_32(read_buf) : BUF_NO_CHECKSUM_MAGIC;
608 609 610 611 612
	old_checksum = srv_use_checksums
		? buf_calc_page_old_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;

	ut_print_timestamp(stderr);
	fprintf(stderr,
613
		"  InnoDB: Page checksum %lu (32bit_calc: %lu), prior-to-4.0.14-form"
614 615 616 617 618 619 620 621
		" checksum %lu\n"
		"InnoDB: stored checksum %lu, prior-to-4.0.14-form"
		" stored checksum %lu\n"
		"InnoDB: Page lsn %lu %lu, low 4 bytes of lsn"
		" at page end %lu\n"
		"InnoDB: Page number (if stored to page already) %lu,\n"
		"InnoDB: space id (if created with >= MySQL-4.1.1"
		" and stored already) %lu\n",
622
		(ulong) checksum, (ulong) checksum_32, (ulong) old_checksum,
623 624 625 626 627 628 629 630 631 632 633
		(ulong) mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM),
		(ulong) mach_read_from_4(read_buf + UNIV_PAGE_SIZE
					 - FIL_PAGE_END_LSN_OLD_CHKSUM),
		(ulong) mach_read_from_4(read_buf + FIL_PAGE_LSN),
		(ulong) mach_read_from_4(read_buf + FIL_PAGE_LSN + 4),
		(ulong) mach_read_from_4(read_buf + UNIV_PAGE_SIZE
					 - FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
		(ulong) mach_read_from_4(read_buf + FIL_PAGE_OFFSET),
		(ulong) mach_read_from_4(read_buf
					 + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
634
#ifndef UNIV_HOTBACKUP
635 636 637 638 639 640 641 642 643 644
	if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE)
	    == TRX_UNDO_INSERT) {
		fprintf(stderr,
			"InnoDB: Page may be an insert undo log page\n");
	} else if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR
				    + TRX_UNDO_PAGE_TYPE)
		   == TRX_UNDO_UPDATE) {
		fprintf(stderr,
			"InnoDB: Page may be an update undo log page\n");
	}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
645
#endif /* !UNIV_HOTBACKUP */
646 647 648 649 650 651 652 653 654 655

	switch (fil_page_get_type(read_buf)) {
	case FIL_PAGE_INDEX:
		fprintf(stderr,
			"InnoDB: Page may be an index page where"
			" index id is %lu %lu\n",
			(ulong) ut_dulint_get_high(
				btr_page_get_index_id(read_buf)),
			(ulong) ut_dulint_get_low(
				btr_page_get_index_id(read_buf)));
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
656
#ifndef UNIV_HOTBACKUP
657 658 659 660 661 662 663
		index = dict_index_find_on_id_low(
			btr_page_get_index_id(read_buf));
		if (index) {
			fputs("InnoDB: (", stderr);
			dict_index_name_print(stderr, NULL, index);
			fputs(")\n", stderr);
		}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
664
#endif /* !UNIV_HOTBACKUP */
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
		break;
	case FIL_PAGE_INODE:
		fputs("InnoDB: Page may be an 'inode' page\n", stderr);
		break;
	case FIL_PAGE_IBUF_FREE_LIST:
		fputs("InnoDB: Page may be an insert buffer free list page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_ALLOCATED:
		fputs("InnoDB: Page may be a freshly allocated page\n",
		      stderr);
		break;
	case FIL_PAGE_IBUF_BITMAP:
		fputs("InnoDB: Page may be an insert buffer bitmap page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_SYS:
		fputs("InnoDB: Page may be a system page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_TRX_SYS:
		fputs("InnoDB: Page may be a transaction system page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_FSP_HDR:
		fputs("InnoDB: Page may be a file space header page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_XDES:
		fputs("InnoDB: Page may be an extent descriptor page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_BLOB:
		fputs("InnoDB: Page may be a BLOB page\n",
		      stderr);
		break;
	case FIL_PAGE_TYPE_ZBLOB:
	case FIL_PAGE_TYPE_ZBLOB2:
		fputs("InnoDB: Page may be a compressed BLOB page\n",
		      stderr);
		break;
	}
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
709 710
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
711 712 713 714 715
Initializes a buffer control block when the buf_pool is created. */
static
void
buf_block_init(
/*===========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
716 717
	buf_block_t*	block,	/*!< in: pointer to control block */
	byte*		frame)	/*!< in: pointer to buffer frame */
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
{
	UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE, block);

	block->frame = frame;

	block->page.state = BUF_BLOCK_NOT_USED;
	block->page.buf_fix_count = 0;
	block->page.io_fix = BUF_IO_NONE;

	block->modify_clock = 0;

#ifdef UNIV_DEBUG_FILE_ACCESSES
	block->page.file_page_was_freed = FALSE;
#endif /* UNIV_DEBUG_FILE_ACCESSES */

	block->check_index_page_at_flush = FALSE;
	block->index = NULL;

#ifdef UNIV_DEBUG
	block->page.in_page_hash = FALSE;
	block->page.in_zip_hash = FALSE;
	block->page.in_flush_list = FALSE;
	block->page.in_free_list = FALSE;
741
#endif /* UNIV_DEBUG */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
742
	block->page.in_LRU_list = FALSE;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
743
	block->in_unzip_LRU_list = FALSE;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
744 745 746
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
	block->n_pointers = 0;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
747 748 749 750 751 752 753 754 755 756 757 758
	page_zip_des_init(&block->page.zip);

	mutex_create(&block->mutex, SYNC_BUF_BLOCK);

	rw_lock_create(&block->lock, SYNC_LEVEL_VARYING);
	ut_ad(rw_lock_validate(&(block->lock)));

#ifdef UNIV_SYNC_DEBUG
	rw_lock_create(&block->debug_latch, SYNC_NO_ORDER_CHECK);
#endif /* UNIV_SYNC_DEBUG */
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
759 760 761
/********************************************************************//**
Allocates a chunk of buffer frames.
@return	chunk, or NULL on failure */
762 763 764 765
static
buf_chunk_t*
buf_chunk_init(
/*===========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
766 767
	buf_chunk_t*	chunk,		/*!< out: chunk of buffers */
	ulint		mem_size)	/*!< in: requested size in bytes */
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
{
	buf_block_t*	block;
	byte*		frame;
	ulint		i;

	/* Round down to a multiple of page size,
	although it already should be. */
	mem_size = ut_2pow_round(mem_size, UNIV_PAGE_SIZE);
	/* Reserve space for the block descriptors. */
	mem_size += ut_2pow_round((mem_size / UNIV_PAGE_SIZE) * (sizeof *block)
				  + (UNIV_PAGE_SIZE - 1), UNIV_PAGE_SIZE);

	chunk->mem_size = mem_size;
	chunk->mem = os_mem_alloc_large(&chunk->mem_size);

	if (UNIV_UNLIKELY(chunk->mem == NULL)) {

		return(NULL);
	}

	/* Allocate the block descriptors from
	the start of the memory block. */
	chunk->blocks = chunk->mem;

	/* Align a pointer to the first frame.  Note that when
	os_large_page_size is smaller than UNIV_PAGE_SIZE,
	we may allocate one fewer block than requested.  When
	it is bigger, we may allocate more blocks than requested. */

	frame = ut_align(chunk->mem, UNIV_PAGE_SIZE);
	chunk->size = chunk->mem_size / UNIV_PAGE_SIZE
		- (frame != chunk->mem);

	/* Subtract the space needed for block descriptors. */
	{
		ulint	size = chunk->size;

		while (frame < (byte*) (chunk->blocks + size)) {
			frame += UNIV_PAGE_SIZE;
			size--;
		}

		chunk->size = size;
	}

	/* Init block structs and assign frames for them. Then we
	assign the frames to the first blocks (we already mapped the
	memory above). */

	block = chunk->blocks;

	for (i = chunk->size; i--; ) {

		buf_block_init(block, frame);

823
#ifdef HAVE_valgrind
824 825 826 827
		/* Wipe contents of frame to eliminate a Purify warning */
		memset(block->frame, '\0', UNIV_PAGE_SIZE);
#endif
		/* Add the block to the free list */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
828 829
		mutex_enter(&free_list_mutex);
		UT_LIST_ADD_LAST(free, buf_pool->free, (&block->page));
830
		ut_d(block->page.in_free_list = TRUE);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
831
		mutex_exit(&free_list_mutex);
832 833 834 835 836 837 838 839 840

		block++;
		frame += UNIV_PAGE_SIZE;
	}

	return(chunk);
}

#ifdef UNIV_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
841
/*********************************************************************//**
842
Finds a block in the given buffer chunk that points to a
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
843 844
given compressed page.
@return	buffer block pointing to the compressed page, or NULL */
845 846 847 848
static
buf_block_t*
buf_chunk_contains_zip(
/*===================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
849 850
	buf_chunk_t*	chunk,	/*!< in: chunk being checked */
	const void*	data)	/*!< in: pointer to compressed page */
851 852 853 854 855
{
	buf_block_t*	block;
	ulint		i;

	ut_ad(buf_pool);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
856
	//ut_ad(buf_pool_mutex_own());
857 858 859 860 861 862 863 864 865 866 867 868 869

	block = chunk->blocks;

	for (i = chunk->size; i--; block++) {
		if (block->page.zip.data == data) {

			return(block);
		}
	}

	return(NULL);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
870
/*********************************************************************//**
871
Finds a block in the buffer pool that points to a
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
872 873
given compressed page.
@return	buffer block pointing to the compressed page, or NULL */
874 875 876 877
UNIV_INTERN
buf_block_t*
buf_pool_contains_zip(
/*==================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
878
	const void*	data)	/*!< in: pointer to compressed page */
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
{
	ulint		n;
	buf_chunk_t*	chunk = buf_pool->chunks;

	for (n = buf_pool->n_chunks; n--; chunk++) {
		buf_block_t* block = buf_chunk_contains_zip(chunk, data);

		if (block) {
			return(block);
		}
	}

	return(NULL);
}
#endif /* UNIV_DEBUG */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
895 896 897
/*********************************************************************//**
Checks that all file pages in the buffer chunk are in a replaceable state.
@return	address of a non-free block, or NULL if all freed */
898 899 900 901
static
const buf_block_t*
buf_chunk_not_freed(
/*================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
902
	buf_chunk_t*	chunk)	/*!< in: chunk being checked */
903 904 905 906 907
{
	buf_block_t*	block;
	ulint		i;

	ut_ad(buf_pool);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
908
	//ut_ad(buf_pool_mutex_own()); /*optimistic...*/
909 910 911 912

	block = chunk->blocks;

	for (i = chunk->size; i--; block++) {
913
		ibool	ready;
914

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
		switch (buf_block_get_state(block)) {
		case BUF_BLOCK_ZIP_FREE:
		case BUF_BLOCK_ZIP_PAGE:
		case BUF_BLOCK_ZIP_DIRTY:
			/* The uncompressed buffer pool should never
			contain compressed block descriptors. */
			ut_error;
			break;
		case BUF_BLOCK_NOT_USED:
		case BUF_BLOCK_READY_FOR_USE:
		case BUF_BLOCK_MEMORY:
		case BUF_BLOCK_REMOVE_HASH:
			/* Skip blocks that are not being used for
			file pages. */
			break;
		case BUF_BLOCK_FILE_PAGE:
			mutex_enter(&block->mutex);
			ready = buf_flush_ready_for_replace(&block->page);
933 934
			mutex_exit(&block->mutex);

935 936 937 938 939
			if (block->page.is_corrupt) {
				/* corrupt page may remain, it can be skipped */
				break;
			}

940 941 942 943 944 945 946
			if (!ready) {

				return(block);
			}

			break;
		}
947 948 949 950 951
	}

	return(NULL);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
952 953 954
/*********************************************************************//**
Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state.
@return	TRUE if all freed */
955 956 957 958
static
ibool
buf_chunk_all_free(
/*===============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
959
	const buf_chunk_t*	chunk)	/*!< in: chunk being checked */
960 961 962 963 964
{
	const buf_block_t*	block;
	ulint			i;

	ut_ad(buf_pool);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
965
	ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
966 967 968 969 970 971 972 973 974 975 976 977 978 979

	block = chunk->blocks;

	for (i = chunk->size; i--; block++) {

		if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) {

			return(FALSE);
		}
	}

	return(TRUE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
980
/********************************************************************//**
981 982 983 984 985
Frees a chunk of buffer frames. */
static
void
buf_chunk_free(
/*===========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
986
	buf_chunk_t*	chunk)		/*!< out: chunk of buffers */
987 988 989 990
{
	buf_block_t*		block;
	const buf_block_t*	block_end;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
991
	ut_ad(buf_pool_mutex_own()); /* but we need all mutex here */
992 993 994 995 996 997 998 999 1000 1001 1002

	block_end = chunk->blocks + chunk->size;

	for (block = chunk->blocks; block < block_end; block++) {
		ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
		ut_a(!block->page.zip.data);

		ut_ad(!block->page.in_LRU_list);
		ut_ad(!block->in_unzip_LRU_list);
		ut_ad(!block->page.in_flush_list);
		/* Remove the block from the free list. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1003
		mutex_enter(&free_list_mutex);
1004
		ut_ad(block->page.in_free_list);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1005 1006
		UT_LIST_REMOVE(free, buf_pool->free, (&block->page));
		mutex_exit(&free_list_mutex);
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019

		/* Free the latches. */
		mutex_free(&block->mutex);
		rw_lock_free(&block->lock);
#ifdef UNIV_SYNC_DEBUG
		rw_lock_free(&block->debug_latch);
#endif /* UNIV_SYNC_DEBUG */
		UNIV_MEM_UNDESC(block);
	}

	os_mem_free_large(chunk->mem, chunk->mem_size);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1020 1021 1022
/********************************************************************//**
Creates the buffer pool.
@return	own: buf_pool object, NULL if not enough memory or error */
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
UNIV_INTERN
buf_pool_t*
buf_pool_init(void)
/*===============*/
{
	buf_chunk_t*	chunk;
	ulint		i;

	buf_pool = mem_zalloc(sizeof(buf_pool_t));

	/* 1. Initialize general fields
	------------------------------- */
	mutex_create(&buf_pool_mutex, SYNC_BUF_POOL);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1036 1037 1038 1039 1040 1041 1042
	mutex_create(&LRU_list_mutex, SYNC_BUF_LRU_LIST);
	mutex_create(&flush_list_mutex, SYNC_BUF_FLUSH_LIST);
	rw_lock_create(&page_hash_latch, SYNC_BUF_PAGE_HASH);
	mutex_create(&free_list_mutex, SYNC_BUF_FREE_LIST);
	mutex_create(&zip_free_mutex, SYNC_BUF_ZIP_FREE);
	mutex_create(&zip_hash_mutex, SYNC_BUF_ZIP_HASH);

1043 1044
	mutex_create(&buf_pool_zip_mutex, SYNC_BUF_BLOCK);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1045 1046
	mutex_enter(&LRU_list_mutex);
	rw_lock_x_lock(&page_hash_latch);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	buf_pool_mutex_enter();

	buf_pool->n_chunks = 1;
	buf_pool->chunks = chunk = mem_alloc(sizeof *chunk);

	UT_LIST_INIT(buf_pool->free);

	if (!buf_chunk_init(chunk, srv_buf_pool_size)) {
		mem_free(chunk);
		mem_free(buf_pool);
		buf_pool = NULL;
		return(NULL);
	}

	srv_buf_pool_old_size = srv_buf_pool_size;
	buf_pool->curr_size = chunk->size;
	srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;

	buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
	buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);

	buf_pool->last_printout_time = time(NULL);

	/* 2. Initialize flushing fields
	-------------------------------- */

	for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
		buf_pool->no_flush[i] = os_event_create(NULL);
	}

	/* 3. Initialize LRU fields
	--------------------------- */
	/* All fields are initialized by mem_zalloc(). */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1081 1082
	mutex_exit(&LRU_list_mutex);
	rw_lock_x_unlock(&page_hash_latch);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
	buf_pool_mutex_exit();

	btr_search_sys_create(buf_pool->curr_size
			      * UNIV_PAGE_SIZE / sizeof(void*) / 64);

	/* 4. Initialize the buddy allocator fields */
	/* All fields are initialized by mem_zalloc(). */

	return(buf_pool);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1094
/********************************************************************//**
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
Frees the buffer pool at shutdown.  This must not be invoked before
freeing all mutexes. */
UNIV_INTERN
void
buf_pool_free(void)
/*===============*/
{
	buf_chunk_t*	chunk;
	buf_chunk_t*	chunks;

	chunks = buf_pool->chunks;
	chunk = chunks + buf_pool->n_chunks;

	while (--chunk >= chunks) {
		/* Bypass the checks of buf_chunk_free(), since they
		would fail at shutdown. */
		os_mem_free_large(chunk->mem, chunk->mem_size);
	}

1114 1115 1116 1117 1118
	mem_free(buf_pool->chunks);
	hash_table_free(buf_pool->page_hash);
	hash_table_free(buf_pool->zip_hash);
	mem_free(buf_pool);
	buf_pool = NULL;
1119 1120
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1121
/********************************************************************//**
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
Drops the adaptive hash index.  To prevent a livelock, this function
is only to be called while holding btr_search_latch and while
btr_search_enabled == FALSE. */
UNIV_INTERN
void
buf_pool_drop_hash_index(void)
/*==========================*/
{
	ibool		released_search_latch;

#ifdef UNIV_SYNC_DEBUG
	ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
	ut_ad(!btr_search_enabled);

	do {
		buf_chunk_t*	chunks	= buf_pool->chunks;
		buf_chunk_t*	chunk	= chunks + buf_pool->n_chunks;

		released_search_latch = FALSE;

		while (--chunk >= chunks) {
			buf_block_t*	block	= chunk->blocks;
			ulint		i	= chunk->size;

			for (; i--; block++) {
				/* block->is_hashed cannot be modified
				when we have an x-latch on btr_search_latch;
				see the comment in buf0buf.h */

1152 1153 1154
				if (buf_block_get_state(block)
				    != BUF_BLOCK_FILE_PAGE
				    || !block->is_hashed) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
					continue;
				}

				/* To follow the latching order, we
				have to release btr_search_latch
				before acquiring block->latch. */
				rw_lock_x_unlock(&btr_search_latch);
				/* When we release the search latch,
				we must rescan all blocks, because
				some may become hashed again. */
				released_search_latch = TRUE;

				rw_lock_x_lock(&block->lock);

				/* This should be guaranteed by the
				callers, which will be holding
				btr_search_enabled_mutex. */
				ut_ad(!btr_search_enabled);

				/* Because we did not buffer-fix the
				block by calling buf_block_get_gen(),
				it is possible that the block has been
				allocated for some other use after
				btr_search_latch was released above.
				We do not care which file page the
				block is mapped to.  All we want to do
				is to drop any hash entries referring
				to the page. */

				/* It is possible that
				block->page.state != BUF_FILE_PAGE.
				Even that does not matter, because
				btr_search_drop_page_hash_index() will
				check block->is_hashed before doing
				anything.  block->is_hashed can only
				be set on uncompressed file pages. */

				btr_search_drop_page_hash_index(block);

				rw_lock_x_unlock(&block->lock);

				rw_lock_x_lock(&btr_search_latch);

				ut_ad(!btr_search_enabled);
			}
		}
	} while (released_search_latch);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1204
/********************************************************************//**
1205 1206 1207 1208 1209 1210 1211
Relocate a buffer control block.  Relocates the block on the LRU list
and in buf_pool->page_hash.  Does not relocate bpage->list.
The caller must take care of relocating bpage->list. */
UNIV_INTERN
void
buf_relocate(
/*=========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1212
	buf_page_t*	bpage,	/*!< in/out: control block being relocated;
1213 1214
				buf_page_get_state(bpage) must be
				BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1215
	buf_page_t*	dpage)	/*!< in/out: destination control block */
1216 1217 1218 1219
{
	buf_page_t*	b;
	ulint		fold;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1220 1221 1222 1223 1224
	//ut_ad(buf_pool_mutex_own());
	ut_ad(mutex_own(&LRU_list_mutex));
#ifdef UNIV_SYNC_DEBUG
	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX));
#endif
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
	ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
	ut_a(bpage->buf_fix_count == 0);
	ut_ad(bpage->in_LRU_list);
	ut_ad(!bpage->in_zip_hash);
	ut_ad(bpage->in_page_hash);
	ut_ad(bpage == buf_page_hash_get(bpage->space, bpage->offset));
#ifdef UNIV_DEBUG
	switch (buf_page_get_state(bpage)) {
	case BUF_BLOCK_ZIP_FREE:
	case BUF_BLOCK_NOT_USED:
	case BUF_BLOCK_READY_FOR_USE:
	case BUF_BLOCK_FILE_PAGE:
	case BUF_BLOCK_MEMORY:
	case BUF_BLOCK_REMOVE_HASH:
		ut_error;
	case BUF_BLOCK_ZIP_DIRTY:
	case BUF_BLOCK_ZIP_PAGE:
		break;
	}
#endif /* UNIV_DEBUG */

	memcpy(dpage, bpage, sizeof *dpage);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1249
	bpage->in_LRU_list = FALSE;
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
	ut_d(bpage->in_page_hash = FALSE);

	/* relocate buf_pool->LRU */
	b = UT_LIST_GET_PREV(LRU, bpage);
	UT_LIST_REMOVE(LRU, buf_pool->LRU, bpage);

	if (b) {
		UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, b, dpage);
	} else {
		UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, dpage);
	}

	if (UNIV_UNLIKELY(buf_pool->LRU_old == bpage)) {
		buf_pool->LRU_old = dpage;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1264 1265 1266
#ifdef UNIV_LRU_DEBUG
		/* buf_pool->LRU_old must be the first item in the LRU list
		whose "old" flag is set. */
1267
		ut_a(buf_pool->LRU_old->old);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1268 1269 1270 1271
		ut_a(!UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)
		     || !UT_LIST_GET_PREV(LRU, buf_pool->LRU_old)->old);
		ut_a(!UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)
		     || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
1272 1273 1274 1275
	} else {
		/* Check that the "old" flag is consistent in
		the block and its neighbours. */
		buf_page_set_old(dpage, buf_page_is_old(dpage));
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1276
#endif /* UNIV_LRU_DEBUG */
1277 1278
	}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1279 1280
	ut_d(UT_LIST_VALIDATE(LRU, buf_page_t, buf_pool->LRU,
			      ut_ad(ut_list_node_313->in_LRU_list)));
1281 1282 1283 1284 1285 1286 1287 1288

	/* relocate buf_pool->page_hash */
	fold = buf_page_address_fold(bpage->space, bpage->offset);

	HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
	HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1289
/********************************************************************//**
1290 1291 1292 1293 1294
Shrinks the buffer pool. */
static
void
buf_pool_shrink(
/*============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1295
	ulint	chunk_size)	/*!< in: number of pages to remove */
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
{
	buf_chunk_t*	chunks;
	buf_chunk_t*	chunk;
	ulint		max_size;
	ulint		max_free_size;
	buf_chunk_t*	max_chunk;
	buf_chunk_t*	max_free_chunk;

	ut_ad(!buf_pool_mutex_own());

try_again:
	btr_search_disable(); /* Empty the adaptive hash index again */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1308 1309
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379

shrink_again:
	if (buf_pool->n_chunks <= 1) {

		/* Cannot shrink if there is only one chunk */
		goto func_done;
	}

	/* Search for the largest free chunk
	not larger than the size difference */
	chunks = buf_pool->chunks;
	chunk = chunks + buf_pool->n_chunks;
	max_size = max_free_size = 0;
	max_chunk = max_free_chunk = NULL;

	while (--chunk >= chunks) {
		if (chunk->size <= chunk_size
		    && chunk->size > max_free_size) {
			if (chunk->size > max_size) {
				max_size = chunk->size;
				max_chunk = chunk;
			}

			if (buf_chunk_all_free(chunk)) {
				max_free_size = chunk->size;
				max_free_chunk = chunk;
			}
		}
	}

	if (!max_free_size) {

		ulint		dirty	= 0;
		ulint		nonfree	= 0;
		buf_block_t*	block;
		buf_block_t*	bend;

		/* Cannot shrink: try again later
		(do not assign srv_buf_pool_old_size) */
		if (!max_chunk) {

			goto func_exit;
		}

		block = max_chunk->blocks;
		bend = block + max_chunk->size;

		/* Move the blocks of chunk to the end of the
		LRU list and try to flush them. */
		for (; block < bend; block++) {
			switch (buf_block_get_state(block)) {
			case BUF_BLOCK_NOT_USED:
				continue;
			case BUF_BLOCK_FILE_PAGE:
				break;
			default:
				nonfree++;
				continue;
			}

			mutex_enter(&block->mutex);
			/* The following calls will temporarily
			release block->mutex and buf_pool_mutex.
			Therefore, we have to always retry,
			even if !dirty && !nonfree. */

			if (!buf_flush_ready_for_replace(&block->page)) {

				buf_LRU_make_block_old(&block->page);
				dirty++;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1380
			} else if (buf_LRU_free_block(&block->page, TRUE, NULL, FALSE)
1381 1382 1383 1384 1385 1386 1387
				   != BUF_LRU_FREED) {
				nonfree++;
			}

			mutex_exit(&block->mutex);
		}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1388 1389
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437

		/* Request for a flush of the chunk if it helps.
		Do not flush if there are non-free blocks, since
		flushing will not make the chunk freeable. */
		if (nonfree) {
			/* Avoid busy-waiting. */
			os_thread_sleep(100000);
		} else if (dirty
			   && buf_flush_batch(BUF_FLUSH_LRU, dirty, 0)
			   == ULINT_UNDEFINED) {

			buf_flush_wait_batch_end(BUF_FLUSH_LRU);
		}

		goto try_again;
	}

	max_size = max_free_size;
	max_chunk = max_free_chunk;

	srv_buf_pool_old_size = srv_buf_pool_size;

	/* Rewrite buf_pool->chunks.  Copy everything but max_chunk. */
	chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
	memcpy(chunks, buf_pool->chunks,
	       (max_chunk - buf_pool->chunks) * sizeof *chunks);
	memcpy(chunks + (max_chunk - buf_pool->chunks),
	       max_chunk + 1,
	       buf_pool->chunks + buf_pool->n_chunks
	       - (max_chunk + 1));
	ut_a(buf_pool->curr_size > max_chunk->size);
	buf_pool->curr_size -= max_chunk->size;
	srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
	chunk_size -= max_chunk->size;
	buf_chunk_free(max_chunk);
	mem_free(buf_pool->chunks);
	buf_pool->chunks = chunks;
	buf_pool->n_chunks--;

	/* Allow a slack of one megabyte. */
	if (chunk_size > 1048576 / UNIV_PAGE_SIZE) {

		goto shrink_again;
	}

func_done:
	srv_buf_pool_old_size = srv_buf_pool_size;
func_exit:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1438 1439
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
1440 1441 1442
	btr_search_enable();
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1443
/********************************************************************//**
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
Rebuild buf_pool->page_hash. */
static
void
buf_pool_page_hash_rebuild(void)
/*============================*/
{
	ulint		i;
	ulint		n_chunks;
	buf_chunk_t*	chunk;
	hash_table_t*	page_hash;
	hash_table_t*	zip_hash;
	buf_page_t*	b;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1457 1458 1459 1460 1461
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	rw_lock_x_lock(&page_hash_latch);
	mutex_enter(&flush_list_mutex);
	
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502

	/* Free, create, and populate the hash table. */
	hash_table_free(buf_pool->page_hash);
	buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size);
	zip_hash = hash_create(2 * buf_pool->curr_size);

	HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash,
		     BUF_POOL_ZIP_FOLD_BPAGE);

	hash_table_free(buf_pool->zip_hash);
	buf_pool->zip_hash = zip_hash;

	/* Insert the uncompressed file pages to buf_pool->page_hash. */

	chunk = buf_pool->chunks;
	n_chunks = buf_pool->n_chunks;

	for (i = 0; i < n_chunks; i++, chunk++) {
		ulint		j;
		buf_block_t*	block = chunk->blocks;

		for (j = 0; j < chunk->size; j++, block++) {
			if (buf_block_get_state(block)
			    == BUF_BLOCK_FILE_PAGE) {
				ut_ad(!block->page.in_zip_hash);
				ut_ad(block->page.in_page_hash);

				HASH_INSERT(buf_page_t, hash, page_hash,
					    buf_page_address_fold(
						    block->page.space,
						    block->page.offset),
					    &block->page);
			}
		}
	}

	/* Insert the compressed-only pages to buf_pool->page_hash.
	All such blocks are either in buf_pool->zip_clean or
	in buf_pool->flush_list. */

	for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1503
	     b = UT_LIST_GET_NEXT(zip_list, b)) {
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
		ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
		ut_ad(!b->in_flush_list);
		ut_ad(b->in_LRU_list);
		ut_ad(b->in_page_hash);
		ut_ad(!b->in_zip_hash);

		HASH_INSERT(buf_page_t, hash, page_hash,
			    buf_page_address_fold(b->space, b->offset), b);
	}

	for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1515
	     b = UT_LIST_GET_NEXT(flush_list, b)) {
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
		ut_ad(b->in_flush_list);
		ut_ad(b->in_LRU_list);
		ut_ad(b->in_page_hash);
		ut_ad(!b->in_zip_hash);

		switch (buf_page_get_state(b)) {
		case BUF_BLOCK_ZIP_DIRTY:
			HASH_INSERT(buf_page_t, hash, page_hash,
				    buf_page_address_fold(b->space,
							  b->offset), b);
			break;
		case BUF_BLOCK_FILE_PAGE:
			/* uncompressed page */
			break;
		case BUF_BLOCK_ZIP_FREE:
		case BUF_BLOCK_ZIP_PAGE:
		case BUF_BLOCK_NOT_USED:
		case BUF_BLOCK_READY_FOR_USE:
		case BUF_BLOCK_MEMORY:
		case BUF_BLOCK_REMOVE_HASH:
			ut_error;
			break;
		}
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1541 1542 1543 1544
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
	rw_lock_x_unlock(&page_hash_latch);
	mutex_exit(&flush_list_mutex);
1545 1546
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1547
/********************************************************************//**
1548 1549 1550 1551 1552 1553
Resizes the buffer pool. */
UNIV_INTERN
void
buf_pool_resize(void)
/*=================*/
{
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1554 1555
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
1556 1557 1558

	if (srv_buf_pool_old_size == srv_buf_pool_size) {

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1559 1560
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
1561 1562 1563 1564 1565
		return;
	}

	if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1566 1567
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600

		/* Disable adaptive hash indexes and empty the index
		in order to free up memory in the buffer pool chunks. */
		buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size)
				/ UNIV_PAGE_SIZE);
	} else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) {

		/* Enlarge the buffer pool by at least one megabyte */

		ulint		mem_size
			= srv_buf_pool_size - srv_buf_pool_curr_size;
		buf_chunk_t*	chunks;
		buf_chunk_t*	chunk;

		chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);

		memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks
		       * sizeof *chunks);

		chunk = &chunks[buf_pool->n_chunks];

		if (!buf_chunk_init(chunk, mem_size)) {
			mem_free(chunks);
		} else {
			buf_pool->curr_size += chunk->size;
			srv_buf_pool_curr_size = buf_pool->curr_size
				* UNIV_PAGE_SIZE;
			mem_free(buf_pool->chunks);
			buf_pool->chunks = chunks;
			buf_pool->n_chunks++;
		}

		srv_buf_pool_old_size = srv_buf_pool_size;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1601 1602
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
1603 1604 1605 1606 1607
	}

	buf_pool_page_hash_rebuild();
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1608
/********************************************************************//**
1609
Moves a page to the start of the buffer pool LRU list. This high-level
1610
function can be used to prevent an important page from slipping out of
1611 1612 1613 1614 1615
the buffer pool. */
UNIV_INTERN
void
buf_page_make_young(
/*================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1616
	buf_page_t*	bpage)	/*!< in: buffer block of a file page */
1617
{
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1618 1619
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
1620 1621 1622 1623 1624

	ut_a(buf_page_in_file(bpage));

	buf_LRU_make_block_young(bpage);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1625 1626
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
1627 1628
}

1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
/********************************************************************//**
Sets the time of the first access of a page and moves a page to the
start of the buffer pool LRU list if it is too old.  This high-level
function can be used to prevent an important page from slipping
out of the buffer pool. */
static
void
buf_page_set_accessed_make_young(
/*=============================*/
	buf_page_t*	bpage,		/*!< in/out: buffer block of a
					file page */
	unsigned	access_time)	/*!< in: bpage->access_time
					read under mutex protection,
					or 0 if unknown */
{
	ut_ad(!buf_pool_mutex_own());
	ut_a(buf_page_in_file(bpage));

	if (buf_page_peek_if_too_old(bpage)) {
		//buf_pool_mutex_enter();
		mutex_enter(&LRU_list_mutex);
		buf_LRU_make_block_young(bpage);
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
	} else if (!access_time) {
		ulint	time_ms = ut_time_ms();
		mutex_t*	block_mutex = buf_page_get_mutex_enter(bpage);
		//buf_pool_mutex_enter();
		if (block_mutex) {
		buf_page_set_accessed(bpage, time_ms);
		mutex_exit(block_mutex);
		}
		//buf_pool_mutex_exit();
	}
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1665
/********************************************************************//**
1666 1667 1668 1669 1670 1671
Resets the check_index_page_at_flush field of a page if found in the buffer
pool. */
UNIV_INTERN
void
buf_reset_check_index_page_at_flush(
/*================================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1672 1673
	ulint	space,	/*!< in: space id */
	ulint	offset)	/*!< in: page number */
1674 1675 1676
{
	buf_block_t*	block;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1677 1678
	//buf_pool_mutex_enter();
	rw_lock_s_lock(&page_hash_latch);
1679 1680 1681 1682 1683 1684 1685

	block = (buf_block_t*) buf_page_hash_get(space, offset);

	if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
		block->check_index_page_at_flush = FALSE;
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1686 1687
	//buf_pool_mutex_exit();
	rw_lock_s_unlock(&page_hash_latch);
1688 1689
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1690
/********************************************************************//**
1691 1692
Returns the current state of is_hashed of a page. FALSE if the page is
not in the pool. NOTE that this operation does not fix the page in the
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1693 1694
pool if it is found there.
@return	TRUE if page hash index is built in search system */
1695 1696 1697 1698
UNIV_INTERN
ibool
buf_page_peek_if_search_hashed(
/*===========================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1699 1700
	ulint	space,	/*!< in: space id */
	ulint	offset)	/*!< in: page number */
1701 1702 1703 1704
{
	buf_block_t*	block;
	ibool		is_hashed;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1705 1706
	//buf_pool_mutex_enter();
	rw_lock_s_lock(&page_hash_latch);
1707 1708 1709 1710 1711 1712 1713 1714 1715

	block = (buf_block_t*) buf_page_hash_get(space, offset);

	if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
		is_hashed = FALSE;
	} else {
		is_hashed = block->is_hashed;
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1716 1717
	//buf_pool_mutex_exit();
	rw_lock_s_unlock(&page_hash_latch);
1718 1719 1720 1721 1722

	return(is_hashed);
}

#ifdef UNIV_DEBUG_FILE_ACCESSES
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1723
/********************************************************************//**
1724 1725 1726
Sets file_page_was_freed TRUE if the page is found in the buffer pool.
This function should be called when we free a file page and want the
debug version to check that it is not accessed any more unless
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1727 1728
reallocated.
@return	control block if found in page hash table, otherwise NULL */
1729 1730 1731 1732
UNIV_INTERN
buf_page_t*
buf_page_set_file_page_was_freed(
/*=============================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1733 1734
	ulint	space,	/*!< in: space id */
	ulint	offset)	/*!< in: page number */
1735 1736 1737
{
	buf_page_t*	bpage;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1738 1739
	//buf_pool_mutex_enter();
	rw_lock_s_lock(&page_hash_latch);
1740 1741 1742 1743 1744 1745 1746

	bpage = buf_page_hash_get(space, offset);

	if (bpage) {
		bpage->file_page_was_freed = TRUE;
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1747 1748
	//buf_pool_mutex_exit();
	rw_lock_s_unlock(&page_hash_latch);
1749 1750 1751 1752

	return(bpage);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1753
/********************************************************************//**
1754 1755 1756
Sets file_page_was_freed FALSE if the page is found in the buffer pool.
This function should be called when we free a file page and want the
debug version to check that it is not accessed any more unless
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1757 1758
reallocated.
@return	control block if found in page hash table, otherwise NULL */
1759 1760 1761 1762
UNIV_INTERN
buf_page_t*
buf_page_reset_file_page_was_freed(
/*===============================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1763 1764
	ulint	space,	/*!< in: space id */
	ulint	offset)	/*!< in: page number */
1765 1766 1767
{
	buf_page_t*	bpage;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1768 1769
	//buf_pool_mutex_enter();
	rw_lock_s_lock(&page_hash_latch);
1770 1771 1772 1773 1774 1775 1776

	bpage = buf_page_hash_get(space, offset);

	if (bpage) {
		bpage->file_page_was_freed = FALSE;
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1777 1778
	//buf_pool_mutex_exit();
	rw_lock_s_unlock(&page_hash_latch);
1779 1780 1781 1782 1783

	return(bpage);
}
#endif /* UNIV_DEBUG_FILE_ACCESSES */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1784
/********************************************************************//**
1785 1786 1787 1788 1789 1790
Get read access to a compressed page (usually of type
FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2).
The page must be released with buf_page_release_zip().
NOTE: the page is not protected by any latch.  Mutual exclusion has to
be implemented at a higher level.  In other words, all possible
accesses to a given page through this function must be protected by
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1791 1792
the same set of mutexes or latches.
@return	pointer to the block */
1793 1794 1795 1796
UNIV_INTERN
buf_page_t*
buf_page_get_zip(
/*=============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1797 1798 1799
	ulint		space,	/*!< in: space id */
	ulint		zip_size,/*!< in: compressed page size */
	ulint		offset)	/*!< in: page number */
1800 1801 1802 1803
{
	buf_page_t*	bpage;
	mutex_t*	block_mutex;
	ibool		must_read;
1804 1805 1806 1807 1808 1809
	unsigned	access_time;
	trx_t*		trx = NULL;
	ulint		sec;
	ulint		ms;
	ib_uint64_t	start_time;
	ib_uint64_t	finish_time;
1810 1811 1812 1813

#ifndef UNIV_LOG_DEBUG
	ut_ad(!ibuf_inside());
#endif
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1814 1815 1816
	if (innobase_get_slow_log()) {
		trx = innobase_get_trx();
	}
1817
	buf_pool->stat.n_page_gets++;
1818 1819

	for (;;) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1820
		//buf_pool_mutex_enter();
1821
lookup:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1822
		rw_lock_s_lock(&page_hash_latch);
1823 1824 1825 1826 1827 1828 1829
		bpage = buf_page_hash_get(space, offset);
		if (bpage) {
			break;
		}

		/* Page not in buf_pool: needs to be read from file */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1830 1831
		//buf_pool_mutex_exit();
		rw_lock_s_unlock(&page_hash_latch);
1832

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1833
		buf_read_page(space, zip_size, offset, trx);
1834 1835 1836 1837 1838 1839 1840 1841

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
		ut_a(++buf_dbg_counter % 37 || buf_validate());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
	}

	if (UNIV_UNLIKELY(!bpage->zip.data)) {
		/* There is no compressed page. */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1842
err_exit:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1843 1844
		//buf_pool_mutex_exit();
		rw_lock_s_unlock(&page_hash_latch);
1845 1846 1847
		return(NULL);
	}

1848 1849 1850 1851 1852 1853 1854 1855
	if (srv_pass_corrupt_table) {
		if (bpage->is_corrupt) {
			rw_lock_s_unlock(&page_hash_latch);
			return(NULL);
		}
	}
	ut_a(!(bpage->is_corrupt));

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1856
	block_mutex = buf_page_get_mutex_enter(bpage);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1857 1858

	rw_lock_s_unlock(&page_hash_latch);
1859 1860 1861 1862 1863 1864 1865

	switch (buf_page_get_state(bpage)) {
	case BUF_BLOCK_NOT_USED:
	case BUF_BLOCK_READY_FOR_USE:
	case BUF_BLOCK_MEMORY:
	case BUF_BLOCK_REMOVE_HASH:
	case BUF_BLOCK_ZIP_FREE:
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1866 1867
		if (block_mutex)
			mutex_exit(block_mutex);
1868 1869 1870
		break;
	case BUF_BLOCK_ZIP_PAGE:
	case BUF_BLOCK_ZIP_DIRTY:
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1871
		ut_a(block_mutex == &buf_pool_zip_mutex);
1872
		bpage->buf_fix_count++;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1873
		goto got_block;
1874
	case BUF_BLOCK_FILE_PAGE:
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1875 1876
		ut_a(block_mutex == &((buf_block_t*) bpage)->mutex);

1877
		/* Discard the uncompressed page frame if possible. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
1878
		if (buf_LRU_free_block(bpage, FALSE, NULL, FALSE)
1879 1880 1881 1882 1883 1884 1885 1886
		    == BUF_LRU_FREED) {

			mutex_exit(block_mutex);
			goto lookup;
		}

		buf_block_buf_fix_inc((buf_block_t*) bpage,
				      __FILE__, __LINE__);
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1887
		goto got_block;
1888 1889
	}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1890 1891 1892 1893
	ut_error;
	goto err_exit;

got_block:
1894
	must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
1895
	access_time = buf_page_is_accessed(bpage);
1896

Vadim Tkachenko's avatar
Vadim Tkachenko committed
1897
	//buf_pool_mutex_exit();
1898 1899 1900

	mutex_exit(block_mutex);

1901
	buf_page_set_accessed_make_young(bpage, access_time);
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916

#ifdef UNIV_DEBUG_FILE_ACCESSES
	ut_a(!bpage->file_page_was_freed);
#endif

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 5771 || buf_validate());
	ut_a(bpage->buf_fix_count > 0);
	ut_a(buf_page_in_file(bpage));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

	if (must_read) {
		/* Let us wait until the read operation
		completes */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1917 1918 1919 1920 1921 1922 1923
		if (innobase_get_slow_log() && trx && trx->take_stats)
		{
			ut_usectime(&sec, &ms);
			start_time = (ib_uint64_t)sec * 1000000 + ms;
		} else {
			start_time = 0;
		}
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
		for (;;) {
			enum buf_io_fix	io_fix;

			mutex_enter(block_mutex);
			io_fix = buf_page_get_io_fix(bpage);
			mutex_exit(block_mutex);

			if (io_fix == BUF_IO_READ) {

				os_thread_sleep(WAIT_FOR_READ);
			} else {
				break;
			}
		}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1938 1939 1940 1941 1942 1943
		if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
		{
			ut_usectime(&sec, &ms);
			finish_time = (ib_uint64_t)sec * 1000000 + ms;
			trx->io_reads_wait_timer += (ulint)(finish_time - start_time);
		}
1944 1945 1946 1947 1948 1949 1950 1951 1952
	}

#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a(ibuf_count_get(buf_page_get_space(bpage),
			    buf_page_get_page_no(bpage)) == 0);
#endif
	return(bpage);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1953
/********************************************************************//**
1954 1955 1956 1957 1958
Initialize some fields of a control block. */
UNIV_INLINE
void
buf_block_init_low(
/*===============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1959
	buf_block_t*	block)	/*!< in: block to init */
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
{
	block->check_index_page_at_flush = FALSE;
	block->index		= NULL;

	block->n_hash_helps	= 0;
	block->is_hashed	= FALSE;
	block->n_fields		= 1;
	block->n_bytes		= 0;
	block->left_side	= TRUE;
}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1970
#endif /* !UNIV_HOTBACKUP */
1971

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1972 1973 1974 1975
/********************************************************************//**
Decompress a block.
@return	TRUE if successful */
UNIV_INTERN
1976 1977 1978
ibool
buf_zip_decompress(
/*===============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
1979 1980
	buf_block_t*	block,	/*!< in/out: block */
	ibool		check)	/*!< in: TRUE=verify the page checksum */
1981
{
1982 1983 1984
	const byte*	frame		= block->page.zip.data;
	ulint		stamp_checksum	= mach_read_from_4(
		frame + FIL_PAGE_SPACE_OR_CHKSUM);
1985 1986 1987 1988

	ut_ad(buf_block_get_zip_size(block));
	ut_a(buf_block_get_space(block) != 0);

1989
	if (UNIV_LIKELY(check && stamp_checksum != BUF_NO_CHECKSUM_MAGIC)) {
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
		ulint	calc_checksum	= page_zip_calc_checksum(
			frame, page_zip_get_size(&block->page.zip));

		if (UNIV_UNLIKELY(stamp_checksum != calc_checksum)) {
			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: compressed page checksum mismatch"
				" (space %u page %u): %lu != %lu\n",
				block->page.space, block->page.offset,
				stamp_checksum, calc_checksum);
			return(FALSE);
		}
	}

	switch (fil_page_get_type(frame)) {
	case FIL_PAGE_INDEX:
		if (page_zip_decompress(&block->page.zip,
2007
					block->frame, TRUE)) {
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
			return(TRUE);
		}

		fprintf(stderr,
			"InnoDB: unable to decompress space %lu page %lu\n",
			(ulong) block->page.space,
			(ulong) block->page.offset);
		return(FALSE);

	case FIL_PAGE_TYPE_ALLOCATED:
	case FIL_PAGE_INODE:
	case FIL_PAGE_IBUF_BITMAP:
	case FIL_PAGE_TYPE_FSP_HDR:
	case FIL_PAGE_TYPE_XDES:
	case FIL_PAGE_TYPE_ZBLOB:
	case FIL_PAGE_TYPE_ZBLOB2:
		/* Copy to uncompressed storage. */
		memcpy(block->frame, frame,
		       buf_block_get_zip_size(block));
		return(TRUE);
	}

	ut_print_timestamp(stderr);
	fprintf(stderr,
		"  InnoDB: unknown compressed page"
		" type %lu\n",
		fil_page_get_type(frame));
	return(FALSE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2038 2039 2040 2041
#ifndef UNIV_HOTBACKUP
/*******************************************************************//**
Gets the block to whose frame the pointer is pointing to.
@return	pointer to block, never NULL */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2042 2043 2044 2045
UNIV_INTERN
buf_block_t*
buf_block_align(
/*============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2046
	const byte*	ptr)	/*!< in: pointer to a frame */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
{
	buf_chunk_t*	chunk;
	ulint		i;

	/* TODO: protect buf_pool->chunks with a mutex (it will
	currently remain constant after buf_pool_init()) */
	for (chunk = buf_pool->chunks, i = buf_pool->n_chunks; i--; chunk++) {
		lint	offs = ptr - chunk->blocks->frame;

		if (UNIV_UNLIKELY(offs < 0)) {

			continue;
		}

		offs >>= UNIV_PAGE_SIZE_SHIFT;

		if (UNIV_LIKELY((ulint) offs < chunk->size)) {
			buf_block_t*	block = &chunk->blocks[offs];

			/* The function buf_chunk_init() invokes
			buf_block_init() so that block[n].frame ==
			block->frame + n * UNIV_PAGE_SIZE.  Check it. */
			ut_ad(block->frame == page_align(ptr));
#ifdef UNIV_DEBUG
			/* A thread that updates these fields must
			hold buf_pool_mutex and block->mutex.  Acquire
			only the latter. */
			mutex_enter(&block->mutex);

			switch (buf_block_get_state(block)) {
			case BUF_BLOCK_ZIP_FREE:
			case BUF_BLOCK_ZIP_PAGE:
			case BUF_BLOCK_ZIP_DIRTY:
				/* These types should only be used in
				the compressed buffer pool, whose
				memory is allocated from
				buf_pool->chunks, in UNIV_PAGE_SIZE
				blocks flagged as BUF_BLOCK_MEMORY. */
				ut_error;
				break;
			case BUF_BLOCK_NOT_USED:
			case BUF_BLOCK_READY_FOR_USE:
			case BUF_BLOCK_MEMORY:
				/* Some data structures contain
				"guess" pointers to file pages.  The
				file pages may have been freed and
				reused.  Do not complain. */
				break;
			case BUF_BLOCK_REMOVE_HASH:
				/* buf_LRU_block_remove_hashed_page()
				will overwrite the FIL_PAGE_OFFSET and
				FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with
				0xff and set the state to
				BUF_BLOCK_REMOVE_HASH. */
				ut_ad(page_get_space_id(page_align(ptr))
				      == 0xffffffff);
				ut_ad(page_get_page_no(page_align(ptr))
				      == 0xffffffff);
				break;
			case BUF_BLOCK_FILE_PAGE:
				ut_ad(block->page.space
				      == page_get_space_id(page_align(ptr)));
				ut_ad(block->page.offset
				      == page_get_page_no(page_align(ptr)));
				break;
			}

			mutex_exit(&block->mutex);
#endif /* UNIV_DEBUG */

			return(block);
		}
	}

	/* The block should always be found. */
	ut_error;
	return(NULL);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2126 2127 2128 2129 2130
/********************************************************************//**
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
the buf_block_t itself or a member of it
@return	TRUE if ptr belongs to a buf_block_t struct */
UNIV_INTERN
2131
ibool
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2132 2133 2134 2135
buf_pointer_is_block_field(
/*=======================*/
	const void*		ptr)	/*!< in: pointer not
					dereferenced */
2136 2137 2138 2139
{
	const buf_chunk_t*		chunk	= buf_pool->chunks;
	const buf_chunk_t* const	echunk	= chunk + buf_pool->n_chunks;

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2140 2141
	/* TODO: protect buf_pool->chunks with a mutex (it will
	currently remain constant after buf_pool_init()) */
2142
	while (chunk < echunk) {
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2143 2144
		if (ptr >= (void *)chunk->blocks
		    && ptr < (void *)(chunk->blocks + chunk->size)) {
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

			return(TRUE);
		}

		chunk++;
	}

	return(FALSE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
/********************************************************************//**
Find out if a buffer block was created by buf_chunk_init().
@return	TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
static
ibool
buf_block_is_uncompressed(
/*======================*/
	const buf_block_t*	block)	/*!< in: pointer to block,
					not dereferenced */
{
	//ut_ad(buf_pool_mutex_own());

	if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
		/* The pointer should be aligned. */
		return(FALSE);
	}

	return(buf_pointer_is_block_field((void *)block));
}

/********************************************************************//**
This is the general function used to get access to a database page.
@return	pointer to the block or NULL */
2178 2179 2180 2181
UNIV_INTERN
buf_block_t*
buf_page_get_gen(
/*=============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2182 2183
	ulint		space,	/*!< in: space id */
	ulint		zip_size,/*!< in: compressed page size in bytes
2184
				or 0 for uncompressed pages */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2185 2186 2187 2188
	ulint		offset,	/*!< in: page number */
	ulint		rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
	buf_block_t*	guess,	/*!< in: guessed block or NULL */
	ulint		mode,	/*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2189
				BUF_GET_NO_LATCH */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2190 2191 2192
	const char*	file,	/*!< in: file name */
	ulint		line,	/*!< in: line where called */
	mtr_t*		mtr)	/*!< in: mini-transaction */
2193 2194
{
	buf_block_t*	block;
2195
	unsigned	access_time;
2196 2197
	ulint		fix_type;
	ibool		must_read;
2198
	ulint		retries = 0;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2199
	mutex_t*	block_mutex;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2200 2201 2202 2203 2204
	trx_t*          trx = NULL;
	ulint           sec;
	ulint           ms;
	ib_uint64_t     start_time;
	ib_uint64_t     finish_time;
2205 2206

	ut_ad(mtr);
2207
	ut_ad(mtr->state == MTR_ACTIVE);
2208 2209 2210 2211 2212
	ut_ad((rw_latch == RW_S_LATCH)
	      || (rw_latch == RW_X_LATCH)
	      || (rw_latch == RW_NO_LATCH));
	ut_ad((mode != BUF_GET_NO_LATCH) || (rw_latch == RW_NO_LATCH));
	ut_ad((mode == BUF_GET) || (mode == BUF_GET_IF_IN_POOL)
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2213
	      || (mode == BUF_GET_NO_LATCH));
2214
	ut_ad(zip_size == fil_space_get_zip_size(space));
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2215
	ut_ad(ut_is_2pow(zip_size));
2216
#ifndef UNIV_LOG_DEBUG
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2217
	ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
2218
#endif
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2219 2220 2221
	if (innobase_get_slow_log()) {
		trx = innobase_get_trx();
	}
2222
	buf_pool->stat.n_page_gets++;
2223 2224
loop:
	block = guess;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2225
	//buf_pool_mutex_enter();
2226 2227

	if (block) {
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2228
		block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2229

2230 2231 2232 2233 2234 2235 2236 2237
		/* If the guess is a compressed page descriptor that
		has been allocated by buf_buddy_alloc(), it may have
		been invalidated by buf_buddy_relocate().  In that
		case, block could point to something that happens to
		contain the expected bits in block->page.  Similarly,
		the guess may be pointing to a buffer pool chunk that
		has been released when resizing the buffer pool. */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2238 2239 2240
		if (!block_mutex) {
			block = guess = NULL;
		} else if (!buf_block_is_uncompressed(block)
2241 2242 2243 2244
		    || offset != block->page.offset
		    || space != block->page.space
		    || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2245 2246
			mutex_exit(block_mutex);

2247 2248 2249 2250 2251 2252 2253 2254
			block = guess = NULL;
		} else {
			ut_ad(!block->page.in_zip_hash);
			ut_ad(block->page.in_page_hash);
		}
	}

	if (block == NULL) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2255
		rw_lock_s_lock(&page_hash_latch);
2256
		block = (buf_block_t*) buf_page_hash_get(space, offset);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2257
		if (block) {
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2258 2259
			block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
			ut_a(block_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2260 2261
		}
		rw_lock_s_unlock(&page_hash_latch);
2262 2263 2264 2265 2266 2267
	}

loop2:
	if (block == NULL) {
		/* Page not in buf_pool: needs to be read from file */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2268
		//buf_pool_mutex_exit();
2269 2270 2271 2272 2273 2274

		if (mode == BUF_GET_IF_IN_POOL) {

			return(NULL);
		}

2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
		if (buf_read_page(space, zip_size, offset, trx)) {
			retries = 0;
		} else if (retries < BUF_PAGE_READ_MAX_RETRIES) {
			++retries;
		} else {
			fprintf(stderr, "InnoDB: Error: Unable"
				" to read tablespace %lu page no"
				" %lu into the buffer pool after"
				" %lu attempts\n"
				"InnoDB: The most probable cause"
				" of this error may be that the"
				" table has been corrupted.\n"
				"InnoDB: You can try to fix this"
				" problem by using"
				" innodb_force_recovery.\n"
				"InnoDB: Please see reference manual"
				" for more details.\n"
				"InnoDB: Aborting...\n",
				space, offset,
				BUF_PAGE_READ_MAX_RETRIES);

			ut_error;
		}
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
		ut_a(++buf_dbg_counter % 37 || buf_validate());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
		goto loop;
	}

	ut_ad(page_zip_get_size(&block->page.zip) == zip_size);

	must_read = buf_block_get_io_fix(block) == BUF_IO_READ;

	if (must_read && mode == BUF_GET_IF_IN_POOL) {
		/* The page is only being read to buffer */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2311 2312
		//buf_pool_mutex_exit();
		mutex_exit(block_mutex);
2313 2314 2315 2316

		return(NULL);
	}

2317 2318 2319 2320 2321 2322 2323 2324
	if (srv_pass_corrupt_table) {
		if (block->page.is_corrupt) {
			mutex_exit(block_mutex);
			return(NULL);
		}
	}
	ut_a(!(block->page.is_corrupt));

2325 2326 2327 2328 2329
	switch (buf_block_get_state(block)) {
		buf_page_t*	bpage;
		ibool		success;

	case BUF_BLOCK_FILE_PAGE:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2330 2331 2332 2333 2334
		if (block_mutex == &buf_pool_zip_mutex) {
			/* it is wrong mutex... */
			mutex_exit(block_mutex);
			goto loop;
		}
2335 2336 2337 2338
		break;

	case BUF_BLOCK_ZIP_PAGE:
	case BUF_BLOCK_ZIP_DIRTY:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2339
		ut_ad(block_mutex == &buf_pool_zip_mutex);
2340
		bpage = &block->page;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2341 2342 2343
		/* Protect bpage->buf_fix_count. */
		/* Already proteced here. */
		//mutex_enter(&buf_pool_zip_mutex);
2344 2345 2346 2347 2348 2349

		if (bpage->buf_fix_count
		    || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
			/* This condition often occurs when the buffer
			is not buffer-fixed, but I/O-fixed by
			buf_page_init_for_read(). */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2350
			//mutex_exit(&buf_pool_zip_mutex);
2351 2352 2353
wait_until_unfixed:
			/* The block is buffer-fixed or I/O-fixed.
			Try again later. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2354 2355
			//buf_pool_mutex_exit();
			mutex_exit(block_mutex);
2356 2357 2358 2359 2360 2361
			os_thread_sleep(WAIT_FOR_READ);

			goto loop;
		}

		/* Allocate an uncompressed page. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2362
		//buf_pool_mutex_exit();
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2363
		//mutex_exit(&buf_pool_zip_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2364
		mutex_exit(block_mutex);
2365 2366 2367

		block = buf_LRU_get_free_block(0);
		ut_a(block);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2368
		block_mutex = &block->mutex;
2369

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2370 2371 2372 2373
		//buf_pool_mutex_enter();
		mutex_enter(&LRU_list_mutex);
		rw_lock_x_lock(&page_hash_latch);
		mutex_enter(block_mutex);
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383

		{
			buf_page_t*	hash_bpage
				= buf_page_hash_get(space, offset);

			if (UNIV_UNLIKELY(bpage != hash_bpage)) {
				/* The buf_pool->page_hash was modified
				while buf_pool_mutex was released.
				Free the block that was allocated. */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2384 2385
				buf_LRU_block_free_non_file_page(block, TRUE);
				mutex_exit(block_mutex);
2386 2387

				block = (buf_block_t*) hash_bpage;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2388
				if (block) {
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2389 2390
					block_mutex = buf_page_get_mutex_enter((buf_page_t*)block);
					ut_a(block_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2391 2392 2393
				}
				rw_lock_x_unlock(&page_hash_latch);
				mutex_exit(&LRU_list_mutex);
2394 2395 2396 2397
				goto loop2;
			}
		}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2398 2399
		mutex_enter(&buf_pool_zip_mutex);

2400 2401 2402 2403
		if (UNIV_UNLIKELY
		    (bpage->buf_fix_count
		     || buf_page_get_io_fix(bpage) != BUF_IO_NONE)) {

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2404
			mutex_exit(&buf_pool_zip_mutex);
2405 2406 2407 2408 2409
			/* The block was buffer-fixed or I/O-fixed
			while buf_pool_mutex was not held by this thread.
			Free the block that was allocated and try again.
			This should be extremely unlikely. */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2410 2411
			buf_LRU_block_free_non_file_page(block, TRUE);
			//mutex_exit(&block->mutex);
2412

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2413 2414
			rw_lock_x_unlock(&page_hash_latch);
			mutex_exit(&LRU_list_mutex);
2415 2416 2417 2418 2419 2420
			goto wait_until_unfixed;
		}

		/* Move the compressed page from bpage to block,
		and uncompress it. */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2421
		mutex_enter(&flush_list_mutex);
2422 2423

		buf_relocate(bpage, &block->page);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2424 2425 2426

		rw_lock_x_unlock(&page_hash_latch);

2427 2428 2429 2430 2431 2432 2433 2434
		buf_block_init_low(block);
		block->lock_hash_val = lock_rec_hash(space, offset);

		UNIV_MEM_DESC(&block->page.zip.data,
			      page_zip_get_size(&block->page.zip), block);

		if (buf_page_get_state(&block->page)
		    == BUF_BLOCK_ZIP_PAGE) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2435
			UT_LIST_REMOVE(zip_list, buf_pool->zip_clean,
2436 2437 2438 2439
				       &block->page);
			ut_ad(!block->page.in_flush_list);
		} else {
			/* Relocate buf_pool->flush_list. */
2440 2441
			buf_flush_relocate_on_flush_list(bpage,
							 &block->page);
2442 2443
		}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2444 2445
		mutex_exit(&flush_list_mutex);

2446 2447 2448 2449 2450 2451 2452 2453
		/* Buffer-fix, I/O-fix, and X-latch the block
		for the duration of the decompression.
		Also add the block to the unzip_LRU list. */
		block->page.state = BUF_BLOCK_FILE_PAGE;

		/* Insert at the front of unzip_LRU list */
		buf_unzip_LRU_add_block(block, FALSE);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2454 2455
		mutex_exit(&LRU_list_mutex);

2456 2457
		block->page.buf_fix_count = 1;
		buf_block_set_io_fix(block, BUF_IO_READ);
2458
		rw_lock_x_lock_func(&block->lock, 0, file, line);
2459 2460 2461

		UNIV_MEM_INVALID(bpage, sizeof *bpage);

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2462 2463
		mutex_exit(block_mutex);
		mutex_exit(&buf_pool_zip_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2464 2465

		mutex_enter(&buf_pool_mutex);
2466
		buf_pool->n_pend_unzip++;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2467 2468 2469
		mutex_exit(&buf_pool_mutex);

		buf_buddy_free(bpage, sizeof *bpage, FALSE);
2470

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2471
		//buf_pool_mutex_exit();
2472 2473 2474 2475

		/* Decompress the page and apply buffered operations
		while not holding buf_pool_mutex or block->mutex. */
		success = buf_zip_decompress(block, srv_use_checksums);
2476
		ut_a(success);
2477

2478
		if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
2479 2480 2481 2482 2483
			ibuf_merge_or_delete_for_page(block, space, offset,
						      zip_size, TRUE);
		}

		/* Unfix and unlatch the block. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2484 2485 2486
		//buf_pool_mutex_enter();
		block_mutex = &block->mutex;
		mutex_enter(block_mutex);
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2487 2488 2489
		block->page.buf_fix_count--;
		buf_block_set_io_fix(block, BUF_IO_NONE);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2490
		mutex_enter(&buf_pool_mutex);
2491
		buf_pool->n_pend_unzip--;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2492
		mutex_exit(&buf_pool_mutex);
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		rw_lock_x_unlock(&block->lock);
		break;

	case BUF_BLOCK_ZIP_FREE:
	case BUF_BLOCK_NOT_USED:
	case BUF_BLOCK_READY_FOR_USE:
	case BUF_BLOCK_MEMORY:
	case BUF_BLOCK_REMOVE_HASH:
		ut_error;
		break;
	}

	ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2507
	//mutex_enter(&block->mutex);
2508 2509 2510 2511
#if UNIV_WORD_SIZE == 4
	/* On 32-bit systems, there is no padding in buf_page_t.  On
	other systems, Valgrind could complain about uninitialized pad
	bytes. */
2512
	UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
2513
#endif
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
	if (mode == BUF_GET_IF_IN_POOL && ibuf_debug) {
		/* Try to evict the block from the buffer pool, to use the
		insert buffer as much as possible. */

		if (buf_LRU_free_block(&block->page, TRUE, NULL)
		    == BUF_LRU_FREED) {
			buf_pool_mutex_exit();
			mutex_exit(&block->mutex);
			fprintf(stderr,
				"innodb_change_buffering_debug evict %u %u\n",
				(unsigned) space, (unsigned) offset);
			return(NULL);
		} else if (buf_flush_page_try(block)) {
			fprintf(stderr,
				"innodb_change_buffering_debug flush %u %u\n",
				(unsigned) space, (unsigned) offset);
			guess = block;
			goto loop;
		}

		/* Failed to evict the page; change it directly */
	}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
2538 2539 2540

	buf_block_buf_fix_inc(block, file, line);

2541
	//mutex_exit(&block->mutex);
2542

2543
	/* Check if this is the first access to the page */
2544

2545
	access_time = buf_page_is_accessed(&block->page);
2546

2547
	//buf_pool_mutex_exit();
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2548
	mutex_exit(block_mutex);
2549

2550
	buf_page_set_accessed_make_young(&block->page, access_time);
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561

#ifdef UNIV_DEBUG_FILE_ACCESSES
	ut_a(!block->page.file_page_was_freed);
#endif

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 5771 || buf_validate());
	ut_a(block->page.buf_fix_count > 0);
	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2562 2563
	switch (rw_latch) {
	case RW_NO_LATCH:
2564 2565 2566 2567
		if (must_read) {
			/* Let us wait until the read operation
			completes */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2568 2569 2570 2571 2572 2573 2574
			if (innobase_get_slow_log() && trx && trx->take_stats)
			{
				ut_usectime(&sec, &ms);
				start_time = (ib_uint64_t)sec * 1000000 + ms;
			} else {
				start_time = 0;
			}
2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
			for (;;) {
				enum buf_io_fix	io_fix;

				mutex_enter(&block->mutex);
				io_fix = buf_block_get_io_fix(block);
				mutex_exit(&block->mutex);

				if (io_fix == BUF_IO_READ) {

					os_thread_sleep(WAIT_FOR_READ);
				} else {
					break;
				}
			}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2589 2590 2591 2592 2593 2594
			if (innobase_get_slow_log() && trx && trx->take_stats && start_time)
			{
				ut_usectime(&sec, &ms);
				finish_time = (ib_uint64_t)sec * 1000000 + ms;
				trx->io_reads_wait_timer += (ulint)(finish_time - start_time);
			}
2595 2596 2597
		}

		fix_type = MTR_MEMO_BUF_FIX;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2598
		break;
2599

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2600
	case RW_S_LATCH:
2601 2602 2603
		rw_lock_s_lock_func(&(block->lock), 0, file, line);

		fix_type = MTR_MEMO_PAGE_S_FIX;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2604 2605 2606 2607
		break;

	default:
		ut_ad(rw_latch == RW_X_LATCH);
2608 2609 2610
		rw_lock_x_lock_func(&(block->lock), 0, file, line);

		fix_type = MTR_MEMO_PAGE_X_FIX;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2611
		break;
2612 2613 2614 2615
	}

	mtr_memo_push(mtr, block, fix_type);

2616
	if (!access_time) {
2617 2618 2619
		/* In the case of a first access, try to apply linear
		read-ahead */

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2620
		buf_read_ahead_linear(space, zip_size, offset, trx);
2621 2622 2623 2624 2625 2626
	}

#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a(ibuf_count_get(buf_block_get_space(block),
			    buf_block_get_page_no(block)) == 0);
#endif
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2627 2628 2629 2630
	if (innobase_get_slow_log()) {
		_increment_page_get_statistics(block, trx);
	}

2631 2632 2633
	return(block);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2634
/********************************************************************//**
2635
This is the general function used to get optimistic access to a database
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2636 2637
page.
@return	TRUE if success */
2638 2639
UNIV_INTERN
ibool
2640 2641
buf_page_optimistic_get(
/*====================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2642 2643 2644
	ulint		rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH */
	buf_block_t*	block,	/*!< in: guessed buffer block */
	ib_uint64_t	modify_clock,/*!< in: modify clock value if mode is
2645
				..._GUESS_ON_CLOCK */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2646 2647 2648
	const char*	file,	/*!< in: file name */
	ulint		line,	/*!< in: line where called */
	mtr_t*		mtr)	/*!< in: mini-transaction */
2649
{
2650
	unsigned	access_time;
2651 2652
	ibool		success;
	ulint		fix_type;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2653
	trx_t*		trx = NULL;
2654

2655 2656 2657
	ut_ad(block);
	ut_ad(mtr);
	ut_ad(mtr->state == MTR_ACTIVE);
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));

	mutex_enter(&block->mutex);

	if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {

		mutex_exit(&block->mutex);

		return(FALSE);
	}

	buf_block_buf_fix_inc(block, file, line);

	mutex_exit(&block->mutex);

2673 2674 2675 2676
	/* Check if this is the first access to the page.
	We do a dirty read on purpose, to avoid mutex contention.
	This field is only used for heuristic purposes; it does not
	affect correctness. */
2677

2678 2679
	access_time = buf_page_is_accessed(&block->page);
	buf_page_set_accessed_make_young(&block->page, access_time);
2680 2681 2682 2683

	ut_ad(!ibuf_inside()
	      || ibuf_page(buf_block_get_space(block),
			   buf_block_get_zip_size(block),
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2684
			   buf_block_get_page_no(block), NULL));
2685 2686

	if (rw_latch == RW_S_LATCH) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2687 2688
		success = rw_lock_s_lock_nowait(&(block->lock),
						file, line);
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
		fix_type = MTR_MEMO_PAGE_S_FIX;
	} else {
		success = rw_lock_x_lock_func_nowait(&(block->lock),
						     file, line);
		fix_type = MTR_MEMO_PAGE_X_FIX;
	}

	if (UNIV_UNLIKELY(!success)) {
		mutex_enter(&block->mutex);
		buf_block_buf_fix_dec(block);
		mutex_exit(&block->mutex);

		return(FALSE);
	}

	if (UNIV_UNLIKELY(modify_clock != block->modify_clock)) {
		buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2706

2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730
		if (rw_latch == RW_S_LATCH) {
			rw_lock_s_unlock(&(block->lock));
		} else {
			rw_lock_x_unlock(&(block->lock));
		}

		mutex_enter(&block->mutex);
		buf_block_buf_fix_dec(block);
		mutex_exit(&block->mutex);

		return(FALSE);
	}

	mtr_memo_push(mtr, block, fix_type);

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 5771 || buf_validate());
	ut_a(block->page.buf_fix_count > 0);
	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

#ifdef UNIV_DEBUG_FILE_ACCESSES
	ut_a(block->page.file_page_was_freed == FALSE);
#endif
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2731 2732 2733 2734
	if (innobase_get_slow_log()) {
		trx = innobase_get_trx();
	}

2735
	if (UNIV_UNLIKELY(!access_time)) {
2736 2737 2738 2739 2740
		/* In the case of a first access, try to apply linear
		read-ahead */

		buf_read_ahead_linear(buf_block_get_space(block),
				      buf_block_get_zip_size(block),
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2741
				      buf_block_get_page_no(block), trx);
2742 2743 2744 2745 2746 2747
	}

#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a(ibuf_count_get(buf_block_get_space(block),
			    buf_block_get_page_no(block)) == 0);
#endif
2748
	buf_pool->stat.n_page_gets++;
2749

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2750 2751 2752
	if (innobase_get_slow_log()) {
		_increment_page_get_statistics(block, trx);
	}
2753 2754 2755
	return(TRUE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2756
/********************************************************************//**
2757 2758
This is used to get access to a known database page, when no waiting can be
done. For example, if a search in an adaptive hash index leads us to this
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2759 2760
frame.
@return	TRUE if success */
2761 2762 2763 2764
UNIV_INTERN
ibool
buf_page_get_known_nowait(
/*======================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2765 2766 2767 2768 2769 2770
	ulint		rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH */
	buf_block_t*	block,	/*!< in: the known page */
	ulint		mode,	/*!< in: BUF_MAKE_YOUNG or BUF_KEEP_OLD */
	const char*	file,	/*!< in: file name */
	ulint		line,	/*!< in: line where called */
	mtr_t*		mtr)	/*!< in: mini-transaction */
2771 2772 2773
{
	ibool		success;
	ulint		fix_type;
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2774
	trx_t*		trx = NULL;
2775 2776

	ut_ad(mtr);
2777
	ut_ad(mtr->state == MTR_ACTIVE);
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
	ut_ad((rw_latch == RW_S_LATCH) || (rw_latch == RW_X_LATCH));

	mutex_enter(&block->mutex);

	if (buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH) {
		/* Another thread is just freeing the block from the LRU list
		of the buffer pool: do not try to access this page; this
		attempt to access the page can only come through the hash
		index because when the buffer block state is ..._REMOVE_HASH,
		we have already removed it from the page address hash table
		of the buffer pool. */

		mutex_exit(&block->mutex);

		return(FALSE);
	}

	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);

	buf_block_buf_fix_inc(block, file, line);

	mutex_exit(&block->mutex);

2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
		//buf_pool_mutex_enter();
		mutex_enter(&LRU_list_mutex);
		buf_LRU_make_block_young(&block->page);
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
	} else if (!buf_page_is_accessed(&block->page)) {
		/* Above, we do a dirty read on purpose, to avoid
		mutex contention.  The field buf_page_t::access_time
		is only used for heuristic purposes.  Writes to the
		field must be protected by mutex, however. */
		ulint	time_ms = ut_time_ms();

		//buf_pool_mutex_enter();
		mutex_enter(&block->mutex);
		buf_page_set_accessed(&block->page, time_ms);
		//buf_pool_mutex_exit();
		mutex_exit(&block->mutex);
2819 2820 2821 2822 2823
	}

	ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));

	if (rw_latch == RW_S_LATCH) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2824 2825
		success = rw_lock_s_lock_nowait(&(block->lock),
						file, line);
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
		fix_type = MTR_MEMO_PAGE_S_FIX;
	} else {
		success = rw_lock_x_lock_func_nowait(&(block->lock),
						     file, line);
		fix_type = MTR_MEMO_PAGE_X_FIX;
	}

	if (!success) {
		mutex_enter(&block->mutex);
		buf_block_buf_fix_dec(block);
		mutex_exit(&block->mutex);

		return(FALSE);
	}

	mtr_memo_push(mtr, block, fix_type);

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 5771 || buf_validate());
	ut_a(block->page.buf_fix_count > 0);
	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG_FILE_ACCESSES
	ut_a(block->page.file_page_was_freed == FALSE);
#endif

#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a((mode == BUF_KEEP_OLD)
	     || (ibuf_count_get(buf_block_get_space(block),
				buf_block_get_page_no(block)) == 0));
#endif
2857
	buf_pool->stat.n_page_gets++;
2858

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2859 2860 2861 2862 2863
	if (innobase_get_slow_log()) {
		trx = innobase_get_trx();
		_increment_page_get_statistics(block, trx);
	}

2864 2865 2866
	return(TRUE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2867
/*******************************************************************//**
2868 2869
Given a tablespace id and page number tries to get that page. If the
page is not in the buffer pool it is not loaded and NULL is returned.
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2870 2871
Suitable for using when holding the kernel mutex.
@return	pointer to a page or NULL */
2872 2873 2874 2875
UNIV_INTERN
const buf_block_t*
buf_page_try_get_func(
/*==================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2876 2877 2878 2879 2880
	ulint		space_id,/*!< in: tablespace id */
	ulint		page_no,/*!< in: page number */
	const char*	file,	/*!< in: file name */
	ulint		line,	/*!< in: line where called */
	mtr_t*		mtr)	/*!< in: mini-transaction */
2881 2882 2883 2884 2885
{
	buf_block_t*	block;
	ibool		success;
	ulint		fix_type;

2886 2887 2888
	ut_ad(mtr);
	ut_ad(mtr->state == MTR_ACTIVE);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2889 2890
	//buf_pool_mutex_enter();
	rw_lock_s_lock(&page_hash_latch);
2891 2892 2893
	block = buf_block_hash_get(space_id, page_no);

	if (!block) {
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2894 2895
		//buf_pool_mutex_exit();
		rw_lock_s_unlock(&page_hash_latch);
2896 2897 2898 2899
		return(NULL);
	}

	mutex_enter(&block->mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2900 2901
	//buf_pool_mutex_exit();
	rw_lock_s_unlock(&page_hash_latch);
2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
	ut_a(buf_block_get_space(block) == space_id);
	ut_a(buf_block_get_page_no(block) == page_no);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

	buf_block_buf_fix_inc(block, file, line);
	mutex_exit(&block->mutex);

	fix_type = MTR_MEMO_PAGE_S_FIX;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2913
	success = rw_lock_s_lock_nowait(&block->lock, file, line);
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942

	if (!success) {
		/* Let us try to get an X-latch. If the current thread
		is holding an X-latch on the page, we cannot get an
		S-latch. */

		fix_type = MTR_MEMO_PAGE_X_FIX;
		success = rw_lock_x_lock_func_nowait(&block->lock,
						     file, line);
	}

	if (!success) {
		mutex_enter(&block->mutex);
		buf_block_buf_fix_dec(block);
		mutex_exit(&block->mutex);

		return(NULL);
	}

	mtr_memo_push(mtr, block, fix_type);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 5771 || buf_validate());
	ut_a(block->page.buf_fix_count > 0);
	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG_FILE_ACCESSES
	ut_a(block->page.file_page_was_freed == FALSE);
#endif /* UNIV_DEBUG_FILE_ACCESSES */
	buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
2943

2944
	buf_pool->stat.n_page_gets++;
2945

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2946 2947 2948 2949 2950
#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a(ibuf_count_get(buf_block_get_space(block),
			    buf_block_get_page_no(block)) == 0);
#endif

2951 2952 2953
	return(block);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2954
/********************************************************************//**
2955 2956 2957 2958 2959
Initialize some fields of a control block. */
UNIV_INLINE
void
buf_page_init_low(
/*==============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2960
	buf_page_t*	bpage)	/*!< in: block to init */
2961 2962 2963 2964 2965
{
	bpage->flush_type = BUF_FLUSH_LRU;
	bpage->io_fix = BUF_IO_NONE;
	bpage->buf_fix_count = 0;
	bpage->freed_page_clock = 0;
2966
	bpage->access_time = 0;
2967 2968 2969
	bpage->newest_modification = 0;
	bpage->oldest_modification = 0;
	HASH_INVALIDATE(bpage, hash);
2970
	bpage->is_corrupt = FALSE;
2971 2972 2973 2974 2975
#ifdef UNIV_DEBUG_FILE_ACCESSES
	bpage->file_page_was_freed = FALSE;
#endif /* UNIV_DEBUG_FILE_ACCESSES */
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2976
/********************************************************************//**
2977 2978 2979 2980 2981
Inits a page to the buffer buf_pool. */
static
void
buf_page_init(
/*==========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2982 2983
	ulint		space,	/*!< in: space id */
	ulint		offset,	/*!< in: offset of the page within space
2984
				in units of a page */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
2985
	buf_block_t*	block)	/*!< in: block to init */
2986 2987 2988
{
	buf_page_t*	hash_page;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
2989 2990 2991 2992
	//ut_ad(buf_pool_mutex_own());
#ifdef UNIV_SYNC_DEBUG
	ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX));
#endif
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024
	ut_ad(mutex_own(&(block->mutex)));
	ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);

	/* Set the state of the block */
	buf_block_set_file_page(block, space, offset);

#ifdef UNIV_DEBUG_VALGRIND
	if (!space) {
		/* Silence valid Valgrind warnings about uninitialized
		data being written to data files.  There are some unused
		bytes on some pages that InnoDB does not initialize. */
		UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
	}
#endif /* UNIV_DEBUG_VALGRIND */

	buf_block_init_low(block);

	block->lock_hash_val	= lock_rec_hash(space, offset);

	/* Insert into the hash table of file pages */

	hash_page = buf_page_hash_get(space, offset);

	if (UNIV_LIKELY_NULL(hash_page)) {
		fprintf(stderr,
			"InnoDB: Error: page %lu %lu already found"
			" in the hash table: %p, %p\n",
			(ulong) space,
			(ulong) offset,
			(const void*) hash_page, (const void*) block);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
		mutex_exit(&block->mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3025 3026
		//buf_pool_mutex_exit();
		rw_lock_x_unlock(&page_hash_latch);
3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
		buf_print();
		buf_LRU_print();
		buf_validate();
		buf_LRU_validate();
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
		ut_error;
	}

	buf_page_init_low(&block->page);

	ut_ad(!block->page.in_zip_hash);
	ut_ad(!block->page.in_page_hash);
	ut_d(block->page.in_page_hash = TRUE);
	HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
		    buf_page_address_fold(space, offset), &block->page);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3044
/********************************************************************//**
3045 3046 3047 3048 3049 3050 3051
Function which inits a page for read to the buffer buf_pool. If the page is
(1) already in buf_pool, or
(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
(3) if the space is deleted or being deleted,
then this function does nothing.
Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
on the buffer frame. The io-handler must take care that the flag is cleared
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3052 3053
and the lock released later.
@return	pointer to the block or NULL */
3054 3055 3056 3057
UNIV_INTERN
buf_page_t*
buf_page_init_for_read(
/*===================*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3058 3059 3060 3061 3062 3063
	ulint*		err,	/*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
	ulint		mode,	/*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */
	ulint		space,	/*!< in: space id */
	ulint		zip_size,/*!< in: compressed page size, or 0 */
	ibool		unzip,	/*!< in: TRUE=request uncompressed page */
	ib_int64_t	tablespace_version,/*!< in: prevents reading from a wrong
3064 3065
				version of the tablespace in case we have done
				DISCARD + IMPORT */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3066
	ulint		offset)	/*!< in: page number */
3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
{
	buf_block_t*	block;
	buf_page_t*	bpage;
	mtr_t		mtr;
	ibool		lru	= FALSE;
	void*		data;

	ut_ad(buf_pool);

	*err = DB_SUCCESS;

	if (mode == BUF_READ_IBUF_PAGES_ONLY) {
		/* It is a read-ahead within an ibuf routine */

		ut_ad(!ibuf_bitmap_page(zip_size, offset));
		ut_ad(ibuf_inside());

		mtr_start(&mtr);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3086 3087
		if (!recv_no_ibuf_operations
		    && !ibuf_page(space, zip_size, offset, &mtr)) {
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104

			mtr_commit(&mtr);

			return(NULL);
		}
	} else {
		ut_ad(mode == BUF_READ_ANY_PAGE);
	}

	if (zip_size && UNIV_LIKELY(!unzip)
	    && UNIV_LIKELY(!recv_recovery_is_on())) {
		block = NULL;
	} else {
		block = buf_LRU_get_free_block(0);
		ut_ad(block);
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3105 3106 3107
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	rw_lock_x_lock(&page_hash_latch);
3108 3109 3110 3111 3112 3113

	if (buf_page_hash_get(space, offset)) {
		/* The page is already in the buffer pool. */
err_exit:
		if (block) {
			mutex_enter(&block->mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3114 3115 3116
			mutex_exit(&LRU_list_mutex);
			rw_lock_x_unlock(&page_hash_latch);
			buf_LRU_block_free_non_file_page(block, FALSE);
3117 3118
			mutex_exit(&block->mutex);
		}
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3119 3120 3121 3122
		else {
			mutex_exit(&LRU_list_mutex);
			rw_lock_x_unlock(&page_hash_latch);
		}
3123

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3124 3125
		bpage = NULL;
		goto func_exit;
3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
	}

	if (fil_tablespace_deleted_or_being_deleted_in_mem(
		    space, tablespace_version)) {
		/* The page belongs to a space which has been
		deleted or is being deleted. */
		*err = DB_TABLESPACE_DELETED;

		goto err_exit;
	}

	if (block) {
		bpage = &block->page;
		mutex_enter(&block->mutex);
		buf_page_init(space, offset, block);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3142 3143
		rw_lock_x_unlock(&page_hash_latch);

3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
		/* The block must be put to the LRU list, to the old blocks */
		buf_LRU_add_block(bpage, TRUE/* to old blocks */);

		/* We set a pass-type x-lock on the frame because then
		the same thread which called for the read operation
		(and is running now at this point of code) can wait
		for the read to complete by waiting for the x-lock on
		the frame; if the x-lock were recursive, the same
		thread would illegally get the x-lock before the page
		read is completed.  The x-lock is cleared by the
		io-handler thread. */

		rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
		buf_page_set_io_fix(bpage, BUF_IO_READ);

		if (UNIV_UNLIKELY(zip_size)) {
			page_zip_set_size(&block->page.zip, zip_size);

			/* buf_pool_mutex may be released and
			reacquired by buf_buddy_alloc().  Thus, we
			must release block->mutex in order not to
			break the latching order in the reacquisition
			of buf_pool_mutex.  We also must defer this
			operation until after the block descriptor has
			been added to buf_pool->LRU and
			buf_pool->page_hash. */
3170
			mutex_exit(&block->mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3171
			data = buf_buddy_alloc(zip_size, &lru, FALSE);
3172
			mutex_enter(&block->mutex);
3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
			block->page.zip.data = data;

			/* To maintain the invariant
			block->in_unzip_LRU_list
			== buf_page_belongs_to_unzip_LRU(&block->page)
			we have to add this block to unzip_LRU
			after block->page.zip.data is set. */
			ut_ad(buf_page_belongs_to_unzip_LRU(&block->page));
			buf_unzip_LRU_add_block(block, TRUE);
		}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3184
		mutex_exit(&LRU_list_mutex);
3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195
		mutex_exit(&block->mutex);
	} else {
		/* Defer buf_buddy_alloc() until after the block has
		been found not to exist.  The buf_buddy_alloc() and
		buf_buddy_free() calls may be expensive because of
		buf_buddy_relocate(). */

		/* The compressed page must be allocated before the
		control block (bpage), in order to avoid the
		invocation of buf_buddy_relocate_block() on
		uninitialized data. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3196 3197
		data = buf_buddy_alloc(zip_size, &lru, TRUE);
		bpage = buf_buddy_alloc(sizeof *bpage, &lru, TRUE);
3198 3199 3200 3201

		/* If buf_buddy_alloc() allocated storage from the LRU list,
		it released and reacquired buf_pool_mutex.  Thus, we must
		check the page_hash again, as it may have been modified. */
3202 3203 3204 3205
		if (UNIV_UNLIKELY(lru)
		    && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) {

			/* The block was added by some other thread. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3206 3207 3208 3209 3210
			buf_buddy_free(bpage, sizeof *bpage, TRUE);
			buf_buddy_free(data, zip_size, TRUE);

			mutex_exit(&LRU_list_mutex);
			rw_lock_x_unlock(&page_hash_latch);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3211 3212 3213

			bpage = NULL;
			goto func_exit;
3214
		}
3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232

		page_zip_des_init(&bpage->zip);
		page_zip_set_size(&bpage->zip, zip_size);
		bpage->zip.data = data;

		mutex_enter(&buf_pool_zip_mutex);
		UNIV_MEM_DESC(bpage->zip.data,
			      page_zip_get_size(&bpage->zip), bpage);
		buf_page_init_low(bpage);
		bpage->state	= BUF_BLOCK_ZIP_PAGE;
		bpage->space	= space;
		bpage->offset	= offset;

#ifdef UNIV_DEBUG
		bpage->in_page_hash = FALSE;
		bpage->in_zip_hash = FALSE;
		bpage->in_flush_list = FALSE;
		bpage->in_free_list = FALSE;
3233
#endif /* UNIV_DEBUG */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3234
		bpage->in_LRU_list = FALSE;
3235 3236 3237 3238 3239

		ut_d(bpage->in_page_hash = TRUE);
		HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
			    buf_page_address_fold(space, offset), bpage);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3240 3241
		rw_lock_x_unlock(&page_hash_latch);

3242 3243
		/* The block must be put to the LRU list, to the old blocks */
		buf_LRU_add_block(bpage, TRUE/* to old blocks */);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3244
		mutex_enter(&flush_list_mutex);
3245
		buf_LRU_insert_zip_clean(bpage);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3246 3247 3248
		mutex_exit(&flush_list_mutex);

		mutex_exit(&LRU_list_mutex);
3249 3250 3251 3252 3253 3254

		buf_page_set_io_fix(bpage, BUF_IO_READ);

		mutex_exit(&buf_pool_zip_mutex);
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3255
	mutex_enter(&buf_pool_mutex);
3256
	buf_pool->n_pend_reads++;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3257
	mutex_exit(&buf_pool_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3258
func_exit:
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3259
	//buf_pool_mutex_exit();
3260 3261 3262 3263 3264 3265

	if (mode == BUF_READ_IBUF_PAGES_ONLY) {

		mtr_commit(&mtr);
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3266
	ut_ad(!bpage || buf_page_in_file(bpage));
3267 3268 3269
	return(bpage);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3270
/********************************************************************//**
3271 3272 3273
Initializes a page to the buffer buf_pool. The page is usually not read
from a file even if it cannot be found in the buffer buf_pool. This is one
of the functions which perform to a block a state transition NOT_USED =>
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3274 3275
FILE_PAGE (the other is buf_page_get_gen).
@return	pointer to the block, page bufferfixed */
3276 3277 3278 3279
UNIV_INTERN
buf_block_t*
buf_page_create(
/*============*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3280 3281
	ulint	space,	/*!< in: space id */
	ulint	offset,	/*!< in: offset of the page within space in units of
3282
			a page */
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3283 3284
	ulint	zip_size,/*!< in: compressed page size, or 0 */
	mtr_t*	mtr)	/*!< in: mini-transaction handle */
3285 3286 3287 3288
{
	buf_frame_t*	frame;
	buf_block_t*	block;
	buf_block_t*	free_block	= NULL;
3289
	ulint		time_ms		= ut_time_ms();
3290 3291

	ut_ad(mtr);
3292
	ut_ad(mtr->state == MTR_ACTIVE);
3293 3294 3295 3296
	ut_ad(space || !zip_size);

	free_block = buf_LRU_get_free_block(0);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3297 3298 3299
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	rw_lock_x_lock(&page_hash_latch);
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311

	block = (buf_block_t*) buf_page_hash_get(space, offset);

	if (block && buf_page_in_file(&block->page)) {
#ifdef UNIV_IBUF_COUNT_DEBUG
		ut_a(ibuf_count_get(space, offset) == 0);
#endif
#ifdef UNIV_DEBUG_FILE_ACCESSES
		block->page.file_page_was_freed = FALSE;
#endif /* UNIV_DEBUG_FILE_ACCESSES */

		/* Page can be found in buf_pool */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3312 3313 3314
		//buf_pool_mutex_exit();
		mutex_exit(&LRU_list_mutex);
		rw_lock_x_unlock(&page_hash_latch);
3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335

		buf_block_free(free_block);

		return(buf_page_get_with_no_latch(space, zip_size,
						  offset, mtr));
	}

	/* If we get here, the page was not in buf_pool: init it there */

#ifdef UNIV_DEBUG
	if (buf_debug_prints) {
		fprintf(stderr, "Creating space %lu page %lu to buffer\n",
			(ulong) space, (ulong) offset);
	}
#endif /* UNIV_DEBUG */

	block = free_block;

	mutex_enter(&block->mutex);

	buf_page_init(space, offset, block);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3336
	rw_lock_x_unlock(&page_hash_latch);
3337 3338 3339 3340 3341

	/* The block must be put to the LRU list */
	buf_LRU_add_block(&block->page, FALSE);

	buf_block_buf_fix_inc(block, __FILE__, __LINE__);
3342
	buf_pool->stat.n_pages_created++;
3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355

	if (zip_size) {
		void*	data;
		ibool	lru;

		/* Prevent race conditions during buf_buddy_alloc(),
		which may release and reacquire buf_pool_mutex,
		by IO-fixing and X-latching the block. */

		buf_page_set_io_fix(&block->page, BUF_IO_READ);
		rw_lock_x_lock(&block->lock);

		page_zip_set_size(&block->page.zip, zip_size);
3356
		mutex_exit(&block->mutex);
3357 3358 3359 3360 3361 3362
		/* buf_pool_mutex may be released and reacquired by
		buf_buddy_alloc().  Thus, we must release block->mutex
		in order not to break the latching order in
		the reacquisition of buf_pool_mutex.  We also must
		defer this operation until after the block descriptor
		has been added to buf_pool->LRU and buf_pool->page_hash. */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3363
		data = buf_buddy_alloc(zip_size, &lru, FALSE);
3364
		mutex_enter(&block->mutex);
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
		block->page.zip.data = data;

		/* To maintain the invariant
		block->in_unzip_LRU_list
		== buf_page_belongs_to_unzip_LRU(&block->page)
		we have to add this block to unzip_LRU after
		block->page.zip.data is set. */
		ut_ad(buf_page_belongs_to_unzip_LRU(&block->page));
		buf_unzip_LRU_add_block(block, FALSE);

		buf_page_set_io_fix(&block->page, BUF_IO_NONE);
		rw_lock_x_unlock(&block->lock);
	}

3379 3380
	buf_page_set_accessed(&block->page, time_ms);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3381 3382
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393

	mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);

	mutex_exit(&block->mutex);

	/* Delete possible entries for the page from the insert buffer:
	such can exist if the page belonged to an index which was dropped */

	ibuf_merge_or_delete_for_page(NULL, space, offset, zip_size, TRUE);

	/* Flush pages from the end of the LRU list if necessary */
3394
	buf_flush_free_margin(FALSE);
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419

	frame = block->frame;

	memset(frame + FIL_PAGE_PREV, 0xff, 4);
	memset(frame + FIL_PAGE_NEXT, 0xff, 4);
	mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);

	/* Reset to zero the file flush lsn field in the page; if the first
	page of an ibdata file is 'created' in this function into the buffer
	pool then we lose the original contents of the file flush lsn stamp.
	Then InnoDB could in a crash recovery print a big, false, corruption
	warning if the stamp contains an lsn bigger than the ib_logfile lsn. */

	memset(frame + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
	ut_a(++buf_dbg_counter % 357 || buf_validate());
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_IBUF_COUNT_DEBUG
	ut_a(ibuf_count_get(buf_block_get_space(block),
			    buf_block_get_page_no(block)) == 0);
#endif
	return(block);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3420
/********************************************************************//**
3421 3422 3423 3424 3425 3426
Completes an asynchronous read or write request of a file page to or from
the buffer pool. */
UNIV_INTERN
void
buf_page_io_complete(
/*=================*/
3427 3428
	buf_page_t*	bpage,	/*!< in: pointer to the block in question */
	trx_t*		trx)
3429 3430 3431 3432
{
	enum buf_io_fix	io_type;
	const ibool	uncompressed = (buf_page_get_state(bpage)
					== BUF_BLOCK_FILE_PAGE);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3433 3434
	enum buf_flush	flush_type;
	mutex_t*	block_mutex;
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474

	ut_a(buf_page_in_file(bpage));

	/* We do not need protect io_fix here by mutex to read
	it because this is the only function where we can change the value
	from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
	ensures that this is the only thread that handles the i/o for this
	block. */

	io_type = buf_page_get_io_fix(bpage);
	ut_ad(io_type == BUF_IO_READ || io_type == BUF_IO_WRITE);

	if (io_type == BUF_IO_READ) {
		ulint	read_page_no;
		ulint	read_space_id;
		byte*	frame;

		if (buf_page_get_zip_size(bpage)) {
			frame = bpage->zip.data;
			buf_pool->n_pend_unzip++;
			if (uncompressed
			    && !buf_zip_decompress((buf_block_t*) bpage,
						   FALSE)) {

				buf_pool->n_pend_unzip--;
				goto corrupt;
			}
			buf_pool->n_pend_unzip--;
		} else {
			ut_a(uncompressed);
			frame = ((buf_block_t*) bpage)->frame;
		}

		/* If this page is not uninitialized and not in the
		doublewrite buffer, then the page number and space id
		should be the same as in block. */
		read_page_no = mach_read_from_4(frame + FIL_PAGE_OFFSET);
		read_space_id = mach_read_from_4(
			frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);

3475 3476
		if ((bpage->space == TRX_SYS_SPACE
		     || (srv_doublewrite_file && bpage->space == TRX_DOUBLEWRITE_SPACE))
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
		    && trx_doublewrite_page_inside(bpage->offset)) {

			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: Error: reading page %lu\n"
				"InnoDB: which is in the"
				" doublewrite buffer!\n",
				(ulong) bpage->offset);
		} else if (!read_space_id && !read_page_no) {
			/* This is likely an uninitialized page. */
		} else if ((bpage->space
			    && bpage->space != read_space_id)
			   || bpage->offset != read_page_no) {
			/* We did not compare space_id to read_space_id
			if bpage->space == 0, because the field on the
			page may contain garbage in MySQL < 4.1.1,
			which only supported bpage->space == 0. */

			ut_print_timestamp(stderr);
			fprintf(stderr,
				"  InnoDB: Error: space id and page n:o"
				" stored in the page\n"
				"InnoDB: read in are %lu:%lu,"
				" should be %lu:%lu!\n",
				(ulong) read_space_id, (ulong) read_page_no,
				(ulong) bpage->space,
				(ulong) bpage->offset);
		}

3506
		if (!srv_pass_corrupt_table || !bpage->is_corrupt) {
3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543
		/* From version 3.23.38 up we store the page checksum
		to the 4 first bytes of the page end lsn field */

		if (buf_page_is_corrupted(frame,
					  buf_page_get_zip_size(bpage))) {
corrupt:
			fprintf(stderr,
				"InnoDB: Database page corruption on disk"
				" or a failed\n"
				"InnoDB: file read of page %lu.\n"
				"InnoDB: You may have to recover"
				" from a backup.\n",
				(ulong) bpage->offset);
			buf_page_print(frame, buf_page_get_zip_size(bpage));
			fprintf(stderr,
				"InnoDB: Database page corruption on disk"
				" or a failed\n"
				"InnoDB: file read of page %lu.\n"
				"InnoDB: You may have to recover"
				" from a backup.\n",
				(ulong) bpage->offset);
			fputs("InnoDB: It is also possible that"
			      " your operating\n"
			      "InnoDB: system has corrupted its"
			      " own file cache\n"
			      "InnoDB: and rebooting your computer"
			      " removes the\n"
			      "InnoDB: error.\n"
			      "InnoDB: If the corrupt page is an index page\n"
			      "InnoDB: you can also try to"
			      " fix the corruption\n"
			      "InnoDB: by dumping, dropping,"
			      " and reimporting\n"
			      "InnoDB: the corrupt table."
			      " You can use CHECK\n"
			      "InnoDB: TABLE to scan your"
			      " table for corruption.\n"
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3544 3545
			      "InnoDB: See also "
			      REFMAN "forcing-recovery.html\n"
3546 3547
			      "InnoDB: about forcing recovery.\n", stderr);

3548
			if (srv_pass_corrupt_table && !trx_sys_sys_space(bpage->space)
3549 3550
			    && bpage->space < SRV_LOG_SPACE_FIRST_ID) {
				fprintf(stderr,
3551
					"InnoDB: space %u will be treated as corrupt.\n",
3552 3553 3554 3555 3556 3557 3558 3559 3560
					bpage->space);
				fil_space_set_corrupt(bpage->space);
				if (trx && trx->dict_operation_lock_mode == 0) {
					dict_table_set_corrupt_by_space(bpage->space, TRUE);
				} else {
					dict_table_set_corrupt_by_space(bpage->space, FALSE);
				}
				bpage->is_corrupt = TRUE;
			} else
3561 3562 3563 3564 3565 3566 3567
			if (srv_force_recovery < SRV_FORCE_IGNORE_CORRUPT) {
				fputs("InnoDB: Ending processing because of"
				      " a corrupt database page.\n",
				      stderr);
				exit(1);
			}
		}
3568
		} /**/
3569 3570 3571 3572

		if (recv_recovery_is_on()) {
			/* Pages must be uncompressed for crash recovery. */
			ut_a(uncompressed);
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3573
			recv_recover_page(TRUE, (buf_block_t*) bpage);
3574 3575 3576 3577
		}

		if (uncompressed && !recv_no_ibuf_operations) {
			ibuf_merge_or_delete_for_page(
3578 3579
				/* Delete possible entries, if bpage is_corrupt */
				(srv_pass_corrupt_table && bpage->is_corrupt) ? NULL :
3580 3581
				(buf_block_t*) bpage, bpage->space,
				bpage->offset, buf_page_get_zip_size(bpage),
3582
				(srv_pass_corrupt_table && bpage->is_corrupt) ? FALSE :
3583 3584 3585 3586
				TRUE);
		}
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3587 3588 3589 3590 3591 3592 3593 3594
	//buf_pool_mutex_enter();
	if (io_type == BUF_IO_WRITE) {
		flush_type = buf_page_get_flush_type(bpage);
		/* to keep consistency at buf_LRU_insert_zip_clean() */
		//if (flush_type == BUF_FLUSH_LRU) { /* optimistic! */
			mutex_enter(&LRU_list_mutex);
		//}
	}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3595 3596
	block_mutex = buf_page_get_mutex_enter(bpage);
	ut_a(block_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3597
	mutex_enter(&buf_pool_mutex);
3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621

#ifdef UNIV_IBUF_COUNT_DEBUG
	if (io_type == BUF_IO_WRITE || uncompressed) {
		/* For BUF_IO_READ of compressed-only blocks, the
		buffered operations will be merged by buf_page_get_gen()
		after the block has been uncompressed. */
		ut_a(ibuf_count_get(bpage->space, bpage->offset) == 0);
	}
#endif
	/* Because this thread which does the unlocking is not the same that
	did the locking, we use a pass value != 0 in unlock, which simply
	removes the newest lock debug record, without checking the thread
	id. */

	buf_page_set_io_fix(bpage, BUF_IO_NONE);

	switch (io_type) {
	case BUF_IO_READ:
		/* NOTE that the call to ibuf may have moved the ownership of
		the x-latch to this OS thread: do not let this confuse you in
		debugging! */

		ut_ad(buf_pool->n_pend_reads > 0);
		buf_pool->n_pend_reads--;
3622
		buf_pool->stat.n_pages_read++;
3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636

		if (uncompressed) {
			rw_lock_x_unlock_gen(&((buf_block_t*) bpage)->lock,
					     BUF_IO_READ);
		}

		break;

	case BUF_IO_WRITE:
		/* Write means a flush operation: call the completion
		routine in the flush system */

		buf_flush_write_complete(bpage);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3637 3638 3639 3640 3641
		/* to keep consistency at buf_LRU_insert_zip_clean() */
		//if (flush_type == BUF_FLUSH_LRU) { /* optimistic! */
			mutex_exit(&LRU_list_mutex);
		//}

3642 3643 3644 3645 3646
		if (uncompressed) {
			rw_lock_s_unlock_gen(&((buf_block_t*) bpage)->lock,
					     BUF_IO_WRITE);
		}

3647
		buf_pool->stat.n_pages_written++;
3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662

		break;

	default:
		ut_error;
	}

#ifdef UNIV_DEBUG
	if (buf_debug_prints) {
		fprintf(stderr, "Has %s page space %lu page no %lu\n",
			io_type == BUF_IO_READ ? "read" : "written",
			(ulong) buf_page_get_space(bpage),
			(ulong) buf_page_get_page_no(bpage));
	}
#endif /* UNIV_DEBUG */
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3663

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3664 3665 3666
	mutex_exit(&buf_pool_mutex);
	mutex_exit(block_mutex);
	//buf_pool_mutex_exit();
3667 3668
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3669
/*********************************************************************//**
3670 3671 3672 3673 3674 3675 3676 3677
Invalidates the file pages in the buffer pool when an archive recovery is
completed. All the file pages buffered must be in a replaceable state when
this function is called: not latched and not modified. */
UNIV_INTERN
void
buf_pool_invalidate(void)
/*=====================*/
{
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
	ibool		freed;
	enum buf_flush	i;

	buf_pool_mutex_enter();

	for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {

		/* As this function is called during startup and
		during redo application phase during recovery, InnoDB
		is single threaded (apart from IO helper threads) at
		this stage. No new write batch can be in intialization
		stage at this point. */
		ut_ad(buf_pool->init_flush[i] == FALSE);

		/* However, it is possible that a write batch that has
		been posted earlier is still not complete. For buffer
		pool invalidation to proceed we must ensure there is NO
		write activity happening. */
		if (buf_pool->n_flush[i] > 0) {
			buf_pool_mutex_exit();
			buf_flush_wait_batch_end(i);
			buf_pool_mutex_enter();
		}
	}

	buf_pool_mutex_exit();
3704 3705 3706 3707 3708 3709 3710 3711 3712

	ut_ad(buf_all_freed());

	freed = TRUE;

	while (freed) {
		freed = buf_LRU_search_and_free_block(100);
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3713 3714
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
3715 3716 3717 3718

	ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
	ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);

3719 3720 3721 3722 3723 3724 3725 3726
	buf_pool->freed_page_clock = 0;
	buf_pool->LRU_old = NULL;
	buf_pool->LRU_old_len = 0;
	buf_pool->LRU_flush_ended = 0;

	memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
	buf_refresh_io_stats();

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3727 3728
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
3729 3730 3731
}

#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3732 3733 3734
/*********************************************************************//**
Validates the buffer buf_pool data structure.
@return	TRUE */
3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
UNIV_INTERN
ibool
buf_validate(void)
/*==============*/
{
	buf_page_t*	b;
	buf_chunk_t*	chunk;
	ulint		i;
	ulint		n_single_flush	= 0;
	ulint		n_lru_flush	= 0;
	ulint		n_list_flush	= 0;
	ulint		n_lru		= 0;
	ulint		n_flush		= 0;
	ulint		n_free		= 0;
	ulint		n_zip		= 0;

	ut_ad(buf_pool);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3753 3754 3755 3756
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	rw_lock_x_lock(&page_hash_latch);
	/* for keep the new latch order, it cannot validate correctly... */
3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854

	chunk = buf_pool->chunks;

	/* Check the uncompressed blocks. */

	for (i = buf_pool->n_chunks; i--; chunk++) {

		ulint		j;
		buf_block_t*	block = chunk->blocks;

		for (j = chunk->size; j--; block++) {

			mutex_enter(&block->mutex);

			switch (buf_block_get_state(block)) {
			case BUF_BLOCK_ZIP_FREE:
			case BUF_BLOCK_ZIP_PAGE:
			case BUF_BLOCK_ZIP_DIRTY:
				/* These should only occur on
				zip_clean, zip_free[], or flush_list. */
				ut_error;
				break;

			case BUF_BLOCK_FILE_PAGE:
				ut_a(buf_page_hash_get(buf_block_get_space(
							       block),
						       buf_block_get_page_no(
							       block))
				     == &block->page);

#ifdef UNIV_IBUF_COUNT_DEBUG
				ut_a(buf_page_get_io_fix(&block->page)
				     == BUF_IO_READ
				     || !ibuf_count_get(buf_block_get_space(
								block),
							buf_block_get_page_no(
								block)));
#endif
				switch (buf_page_get_io_fix(&block->page)) {
				case BUF_IO_NONE:
					break;

				case BUF_IO_WRITE:
					switch (buf_page_get_flush_type(
							&block->page)) {
					case BUF_FLUSH_LRU:
						n_lru_flush++;
						ut_a(rw_lock_is_locked(
							     &block->lock,
							     RW_LOCK_SHARED));
						break;
					case BUF_FLUSH_LIST:
						n_list_flush++;
						break;
					case BUF_FLUSH_SINGLE_PAGE:
						n_single_flush++;
						break;
					default:
						ut_error;
					}

					break;

				case BUF_IO_READ:

					ut_a(rw_lock_is_locked(&block->lock,
							       RW_LOCK_EX));
					break;
				}

				n_lru++;

				if (block->page.oldest_modification > 0) {
					n_flush++;
				}

				break;

			case BUF_BLOCK_NOT_USED:
				n_free++;
				break;

			case BUF_BLOCK_READY_FOR_USE:
			case BUF_BLOCK_MEMORY:
			case BUF_BLOCK_REMOVE_HASH:
				/* do nothing */
				break;
			}

			mutex_exit(&block->mutex);
		}
	}

	mutex_enter(&buf_pool_zip_mutex);

	/* Check clean compressed-only blocks. */

	for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3855
	     b = UT_LIST_GET_NEXT(zip_list, b)) {
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879
		ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
		switch (buf_page_get_io_fix(b)) {
		case BUF_IO_NONE:
			/* All clean blocks should be I/O-unfixed. */
			break;
		case BUF_IO_READ:
			/* In buf_LRU_free_block(), we temporarily set
			b->io_fix = BUF_IO_READ for a newly allocated
			control block in order to prevent
			buf_page_get_gen() from decompressing the block. */
			break;
		default:
			ut_error;
			break;
		}
		ut_a(!b->oldest_modification);
		ut_a(buf_page_hash_get(b->space, b->offset) == b);

		n_lru++;
		n_zip++;
	}

	/* Check dirty compressed-only blocks. */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3880
	mutex_enter(&flush_list_mutex);
3881
	for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3882
	     b = UT_LIST_GET_NEXT(flush_list, b)) {
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
		ut_ad(b->in_flush_list);

		switch (buf_page_get_state(b)) {
		case BUF_BLOCK_ZIP_DIRTY:
			ut_a(b->oldest_modification);
			n_lru++;
			n_flush++;
			n_zip++;
			switch (buf_page_get_io_fix(b)) {
			case BUF_IO_NONE:
			case BUF_IO_READ:
				break;

			case BUF_IO_WRITE:
				switch (buf_page_get_flush_type(b)) {
				case BUF_FLUSH_LRU:
					n_lru_flush++;
					break;
				case BUF_FLUSH_LIST:
					n_list_flush++;
					break;
				case BUF_FLUSH_SINGLE_PAGE:
					n_single_flush++;
					break;
				default:
					ut_error;
				}
				break;
			}
			break;
		case BUF_BLOCK_FILE_PAGE:
			/* uncompressed page */
			break;
		case BUF_BLOCK_ZIP_FREE:
		case BUF_BLOCK_ZIP_PAGE:
		case BUF_BLOCK_NOT_USED:
		case BUF_BLOCK_READY_FOR_USE:
		case BUF_BLOCK_MEMORY:
		case BUF_BLOCK_REMOVE_HASH:
			ut_error;
			break;
		}
		ut_a(buf_page_hash_get(b->space, b->offset) == b);
	}
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3927
	mutex_exit(&flush_list_mutex);
3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938

	mutex_exit(&buf_pool_zip_mutex);

	if (n_lru + n_free > buf_pool->curr_size + n_zip) {
		fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n",
			(ulong) n_lru, (ulong) n_free,
			(ulong) buf_pool->curr_size, (ulong) n_zip);
		ut_error;
	}

	ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == n_lru);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3939 3940
	/* because of latching order with block->mutex, we cannot get free_list_mutex before that */
/*
3941 3942 3943 3944 3945 3946
	if (UT_LIST_GET_LEN(buf_pool->free) != n_free) {
		fprintf(stderr, "Free list len %lu, free blocks %lu\n",
			(ulong) UT_LIST_GET_LEN(buf_pool->free),
			(ulong) n_free);
		ut_error;
	}
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3947 3948 3949
*/
	/* because of latching order with block->mutex, we cannot get flush_list_mutex before that */
/*
3950 3951 3952 3953 3954
	ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);

	ut_a(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_single_flush);
	ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
	ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
3955
*/
3956

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3957 3958 3959
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
	rw_lock_x_unlock(&page_hash_latch);
3960 3961 3962 3963 3964 3965 3966 3967 3968

	ut_a(buf_LRU_validate());
	ut_a(buf_flush_validate());

	return(TRUE);
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
3969
/*********************************************************************//**
3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
Prints info of the buffer buf_pool data structure. */
UNIV_INTERN
void
buf_print(void)
/*===========*/
{
	dulint*		index_ids;
	ulint*		counts;
	ulint		size;
	ulint		i;
	ulint		j;
	dulint		id;
	ulint		n_found;
	buf_chunk_t*	chunk;
	dict_index_t*	index;

	ut_ad(buf_pool);

	size = buf_pool->curr_size;

	index_ids = mem_alloc(sizeof(dulint) * size);
	counts = mem_alloc(sizeof(ulint) * size);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
3993 3994 3995 3996
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	mutex_enter(&free_list_mutex);
	mutex_enter(&flush_list_mutex);
3997 3998 3999 4000 4001 4002 4003 4004 4005

	fprintf(stderr,
		"buf_pool size %lu\n"
		"database pages %lu\n"
		"free pages %lu\n"
		"modified database pages %lu\n"
		"n pending decompressions %lu\n"
		"n pending reads %lu\n"
		"n pending flush LRU %lu list %lu single page %lu\n"
4006
		"pages made young %lu, not young %lu\n"
4007 4008 4009 4010 4011 4012 4013 4014 4015 4016
		"pages read %lu, created %lu, written %lu\n",
		(ulong) size,
		(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
		(ulong) UT_LIST_GET_LEN(buf_pool->free),
		(ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
		(ulong) buf_pool->n_pend_unzip,
		(ulong) buf_pool->n_pend_reads,
		(ulong) buf_pool->n_flush[BUF_FLUSH_LRU],
		(ulong) buf_pool->n_flush[BUF_FLUSH_LIST],
		(ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE],
4017 4018 4019 4020 4021
		(ulong) buf_pool->stat.n_pages_made_young,
		(ulong) buf_pool->stat.n_pages_not_made_young,
		(ulong) buf_pool->stat.n_pages_read,
		(ulong) buf_pool->stat.n_pages_created,
		(ulong) buf_pool->stat.n_pages_written);
4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062

	/* Count the number of blocks belonging to each index in the buffer */

	n_found = 0;

	chunk = buf_pool->chunks;

	for (i = buf_pool->n_chunks; i--; chunk++) {
		buf_block_t*	block		= chunk->blocks;
		ulint		n_blocks	= chunk->size;

		for (; n_blocks--; block++) {
			const buf_frame_t* frame = block->frame;

			if (fil_page_get_type(frame) == FIL_PAGE_INDEX) {

				id = btr_page_get_index_id(frame);

				/* Look for the id in the index_ids array */
				j = 0;

				while (j < n_found) {

					if (ut_dulint_cmp(index_ids[j],
							  id) == 0) {
						counts[j]++;

						break;
					}
					j++;
				}

				if (j == n_found) {
					n_found++;
					index_ids[j] = id;
					counts[j] = 1;
				}
			}
		}
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4063 4064 4065 4066
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
	mutex_exit(&free_list_mutex);
	mutex_exit(&flush_list_mutex);
4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090

	for (i = 0; i < n_found; i++) {
		index = dict_index_get_if_in_cache(index_ids[i]);

		fprintf(stderr,
			"Block count for index %lu in buffer is about %lu",
			(ulong) ut_dulint_get_low(index_ids[i]),
			(ulong) counts[i]);

		if (index) {
			putc(' ', stderr);
			dict_index_name_print(stderr, NULL, index);
		}

		putc('\n', stderr);
	}

	mem_free(index_ids);
	mem_free(counts);

	ut_a(buf_validate());
}
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4091
#ifdef UNIV_DEBUG
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4092 4093 4094
/*********************************************************************//**
Returns the number of latched pages in the buffer pool.
@return	number of latched pages */
4095 4096 4097 4098 4099 4100 4101 4102 4103 4104
UNIV_INTERN
ulint
buf_get_latched_pages_number(void)
/*==============================*/
{
	buf_chunk_t*	chunk;
	buf_page_t*	b;
	ulint		i;
	ulint		fixed_pages_number = 0;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4105
	//buf_pool_mutex_enter();
4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138

	chunk = buf_pool->chunks;

	for (i = buf_pool->n_chunks; i--; chunk++) {
		buf_block_t*	block;
		ulint		j;

		block = chunk->blocks;

		for (j = chunk->size; j--; block++) {
			if (buf_block_get_state(block)
			    != BUF_BLOCK_FILE_PAGE) {

				continue;
			}

			mutex_enter(&block->mutex);

			if (block->page.buf_fix_count != 0
			    || buf_page_get_io_fix(&block->page)
			    != BUF_IO_NONE) {
				fixed_pages_number++;
			}

			mutex_exit(&block->mutex);
		}
	}

	mutex_enter(&buf_pool_zip_mutex);

	/* Traverse the lists of clean and dirty compressed-only blocks. */

	for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4139
	     b = UT_LIST_GET_NEXT(zip_list, b)) {
4140 4141 4142 4143 4144 4145 4146 4147 4148
		ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
		ut_a(buf_page_get_io_fix(b) != BUF_IO_WRITE);

		if (b->buf_fix_count != 0
		    || buf_page_get_io_fix(b) != BUF_IO_NONE) {
			fixed_pages_number++;
		}
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4149
	mutex_enter(&flush_list_mutex);
4150
	for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4151
	     b = UT_LIST_GET_NEXT(flush_list, b)) {
4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
		ut_ad(b->in_flush_list);

		switch (buf_page_get_state(b)) {
		case BUF_BLOCK_ZIP_DIRTY:
			if (b->buf_fix_count != 0
			    || buf_page_get_io_fix(b) != BUF_IO_NONE) {
				fixed_pages_number++;
			}
			break;
		case BUF_BLOCK_FILE_PAGE:
			/* uncompressed page */
			break;
		case BUF_BLOCK_ZIP_FREE:
		case BUF_BLOCK_ZIP_PAGE:
		case BUF_BLOCK_NOT_USED:
		case BUF_BLOCK_READY_FOR_USE:
		case BUF_BLOCK_MEMORY:
		case BUF_BLOCK_REMOVE_HASH:
			ut_error;
			break;
		}
	}
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4174
	mutex_exit(&flush_list_mutex);
4175 4176

	mutex_exit(&buf_pool_zip_mutex);
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4177
	//buf_pool_mutex_exit();
4178 4179 4180

	return(fixed_pages_number);
}
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4181
#endif /* UNIV_DEBUG */
4182

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4183 4184 4185
/*********************************************************************//**
Returns the number of pending buf pool ios.
@return	number of pending I/O operations */
4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196
UNIV_INTERN
ulint
buf_get_n_pending_ios(void)
/*=======================*/
{
	return(buf_pool->n_pend_reads
	       + buf_pool->n_flush[BUF_FLUSH_LRU]
	       + buf_pool->n_flush[BUF_FLUSH_LIST]
	       + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4197
/*********************************************************************//**
4198
Returns the ratio in percents of modified pages in the buffer pool /
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4199 4200
database pages in the buffer pool.
@return	modified page percentage ratio */
4201 4202 4203 4204 4205 4206 4207
UNIV_INTERN
ulint
buf_get_modified_ratio_pct(void)
/*============================*/
{
	ulint	ratio;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4208
	//buf_pool_mutex_enter(); /* optimistic */
4209 4210 4211 4212 4213 4214 4215

	ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list))
		/ (1 + UT_LIST_GET_LEN(buf_pool->LRU)
		   + UT_LIST_GET_LEN(buf_pool->free));

	/* 1 + is there to avoid division by zero */

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4216
	//buf_pool_mutex_exit(); /* optimistic */
4217 4218 4219 4220

	return(ratio);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4221
/*********************************************************************//**
4222 4223 4224 4225 4226
Prints info of the buffer i/o. */
UNIV_INTERN
void
buf_print_io(
/*=========*/
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4227
	FILE*	file)	/*!< in/out: buffer where to print */
4228 4229 4230
{
	time_t	current_time;
	double	time_elapsed;
4231
	ulint	n_gets_diff;
4232 4233 4234

	ut_ad(buf_pool);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4235 4236 4237 4238 4239
	//buf_pool_mutex_enter();
	mutex_enter(&LRU_list_mutex);
	mutex_enter(&free_list_mutex);
	mutex_enter(&buf_pool_mutex);
	mutex_enter(&flush_list_mutex);
4240 4241

	fprintf(file,
4242 4243 4244 4245
		"Buffer pool size        %lu\n"
		"Buffer pool size, bytes %lu\n"
		"Free buffers            %lu\n"
		"Database pages          %lu\n"
4246
		"Old database pages      %lu\n"
4247
		"Modified db pages       %lu\n"
4248 4249
		"Pending reads %lu\n"
		"Pending writes: LRU %lu, flush list %lu, single page %lu\n",
4250 4251
		(ulong) buf_pool->curr_size,
		(ulong) buf_pool->curr_size * UNIV_PAGE_SIZE,
4252 4253
		(ulong) UT_LIST_GET_LEN(buf_pool->free),
		(ulong) UT_LIST_GET_LEN(buf_pool->LRU),
4254
		(ulong) buf_pool->LRU_old_len,
4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267
		(ulong) UT_LIST_GET_LEN(buf_pool->flush_list),
		(ulong) buf_pool->n_pend_reads,
		(ulong) buf_pool->n_flush[BUF_FLUSH_LRU]
		+ buf_pool->init_flush[BUF_FLUSH_LRU],
		(ulong) buf_pool->n_flush[BUF_FLUSH_LIST]
		+ buf_pool->init_flush[BUF_FLUSH_LIST],
		(ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);

	current_time = time(NULL);
	time_elapsed = 0.001 + difftime(current_time,
					buf_pool->last_printout_time);

	fprintf(file,
4268 4269
		"Pages made young %lu, not young %lu\n"
		"%.2f youngs/s, %.2f non-youngs/s\n"
4270 4271
		"Pages read %lu, created %lu, written %lu\n"
		"%.2f reads/s, %.2f creates/s, %.2f writes/s\n",
4272 4273 4274 4275 4276 4277 4278
		(ulong) buf_pool->stat.n_pages_made_young,
		(ulong) buf_pool->stat.n_pages_not_made_young,
		(buf_pool->stat.n_pages_made_young
		 - buf_pool->old_stat.n_pages_made_young)
		/ time_elapsed,
		(buf_pool->stat.n_pages_not_made_young
		 - buf_pool->old_stat.n_pages_not_made_young)
4279
		/ time_elapsed,
4280 4281 4282 4283 4284
		(ulong) buf_pool->stat.n_pages_read,
		(ulong) buf_pool->stat.n_pages_created,
		(ulong) buf_pool->stat.n_pages_written,
		(buf_pool->stat.n_pages_read
		 - buf_pool->old_stat.n_pages_read)
4285
		/ time_elapsed,
4286 4287 4288 4289 4290
		(buf_pool->stat.n_pages_created
		 - buf_pool->old_stat.n_pages_created)
		/ time_elapsed,
		(buf_pool->stat.n_pages_written
		 - buf_pool->old_stat.n_pages_written)
4291 4292
		/ time_elapsed);

4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
	n_gets_diff = buf_pool->stat.n_page_gets - buf_pool->old_stat.n_page_gets;

	if (n_gets_diff) {
		fprintf(file,
			"Buffer pool hit rate %lu / 1000,"
			" young-making rate %lu / 1000 not %lu / 1000\n",
			(ulong)
			(1000 - ((1000 * (buf_pool->stat.n_pages_read
					  - buf_pool->old_stat.n_pages_read))
				 / (buf_pool->stat.n_page_gets
				    - buf_pool->old_stat.n_page_gets))),
4304
			(ulong)
4305 4306 4307 4308 4309 4310 4311
			(1000 * (buf_pool->stat.n_pages_made_young
				 - buf_pool->old_stat.n_pages_made_young)
			 / n_gets_diff),
			(ulong)
			(1000 * (buf_pool->stat.n_pages_not_made_young
				 - buf_pool->old_stat.n_pages_not_made_young)
			 / n_gets_diff));
4312 4313 4314 4315 4316
	} else {
		fputs("No buffer pool page gets since the last printout\n",
		      file);
	}

4317 4318 4319 4320 4321 4322 4323 4324 4325
	/* Statistics about read ahead algorithm */
	fprintf(file, "Pages read ahead %.2f/s,"
		" evicted without access %.2f/s\n",
		(buf_pool->stat.n_ra_pages_read
		- buf_pool->old_stat.n_ra_pages_read)
		/ time_elapsed,
		(buf_pool->stat.n_ra_pages_evicted
		- buf_pool->old_stat.n_ra_pages_evicted)
		/ time_elapsed);
4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336

	/* Print some values to help us with visualizing what is
	happening with LRU eviction. */
	fprintf(file,
		"LRU len: %lu, unzip_LRU len: %lu\n"
		"I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
		UT_LIST_GET_LEN(buf_pool->LRU),
		UT_LIST_GET_LEN(buf_pool->unzip_LRU),
		buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
		buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);

4337
	buf_refresh_io_stats();
Vadim Tkachenko's avatar
Vadim Tkachenko committed
4338 4339 4340 4341 4342
	//buf_pool_mutex_exit();
	mutex_exit(&LRU_list_mutex);
	mutex_exit(&free_list_mutex);
	mutex_exit(&buf_pool_mutex);
	mutex_exit(&flush_list_mutex);
4343 4344
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4345
/**********************************************************************//**
4346 4347 4348 4349 4350 4351 4352
Refreshes the statistics used to print per-second averages. */
UNIV_INTERN
void
buf_refresh_io_stats(void)
/*======================*/
{
	buf_pool->last_printout_time = time(NULL);
4353
	buf_pool->old_stat = buf_pool->stat;
4354 4355
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4356 4357 4358
/*********************************************************************//**
Asserts that all file pages in the buffer are in a replaceable state.
@return	TRUE */
4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
UNIV_INTERN
ibool
buf_all_freed(void)
/*===============*/
{
	buf_chunk_t*	chunk;
	ulint		i;

	ut_ad(buf_pool);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4369
	//buf_pool_mutex_enter(); /* optimistic */
4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385

	chunk = buf_pool->chunks;

	for (i = buf_pool->n_chunks; i--; chunk++) {

		const buf_block_t* block = buf_chunk_not_freed(chunk);

		if (UNIV_LIKELY_NULL(block)) {
			fprintf(stderr,
				"Page %lu %lu still fixed or dirty\n",
				(ulong) block->page.space,
				(ulong) block->page.offset);
			ut_error;
		}
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4386
	//buf_pool_mutex_exit(); /* optimistic */
4387 4388 4389 4390

	return(TRUE);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4391
/*********************************************************************//**
4392
Checks that there currently are no pending i/o-operations for the buffer
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4393 4394
pool.
@return	TRUE if there is no pending i/o */
4395 4396 4397 4398 4399 4400 4401
UNIV_INTERN
ibool
buf_pool_check_no_pending_io(void)
/*==============================*/
{
	ibool	ret;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4402 4403
	//buf_pool_mutex_enter();
	mutex_enter(&buf_pool_mutex);
4404 4405 4406 4407 4408 4409 4410 4411 4412

	if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU]
	    + buf_pool->n_flush[BUF_FLUSH_LIST]
	    + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
		ret = FALSE;
	} else {
		ret = TRUE;
	}

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4413 4414
	//buf_pool_mutex_exit();
	mutex_exit(&buf_pool_mutex);
4415 4416 4417 4418

	return(ret);
}

Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4419 4420 4421
/*********************************************************************//**
Gets the current length of the free list of buffer blocks.
@return	length of the free list */
4422 4423 4424 4425 4426 4427 4428
UNIV_INTERN
ulint
buf_get_free_list_len(void)
/*=======================*/
{
	ulint	len;

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4429 4430
	//buf_pool_mutex_enter();
	mutex_enter(&free_list_mutex);
4431 4432 4433

	len = UT_LIST_GET_LEN(buf_pool->free);

Vadim Tkachenko's avatar
Vadim Tkachenko committed
4434 4435
	//buf_pool_mutex_exit();
	mutex_exit(&free_list_mutex);
4436 4437 4438

	return(len);
}
Aleksandr Kuzminsky's avatar
Aleksandr Kuzminsky committed
4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468
#else /* !UNIV_HOTBACKUP */
/********************************************************************//**
Inits a page to the buffer buf_pool, for use in ibbackup --restore. */
UNIV_INTERN
void
buf_page_init_for_backup_restore(
/*=============================*/
	ulint		space,	/*!< in: space id */
	ulint		offset,	/*!< in: offset of the page within space
				in units of a page */
	ulint		zip_size,/*!< in: compressed page size in bytes
				or 0 for uncompressed pages */
	buf_block_t*	block)	/*!< in: block to init */
{
	block->page.state	= BUF_BLOCK_FILE_PAGE;
	block->page.space	= space;
	block->page.offset	= offset;

	page_zip_des_init(&block->page.zip);

	/* We assume that block->page.data has been allocated
	with zip_size == UNIV_PAGE_SIZE. */
	ut_ad(zip_size <= UNIV_PAGE_SIZE);
	ut_ad(ut_is_2pow(zip_size));
	page_zip_set_size(&block->page.zip, zip_size);
	if (zip_size) {
		block->page.zip.data = block->frame + UNIV_PAGE_SIZE;
	}
}
#endif /* !UNIV_HOTBACKUP */