buf0dblwr.cc 35.3 KB
Newer Older
1 2
/*****************************************************************************

Vicențiu Ciorbaru's avatar
Vicențiu Ciorbaru committed
3
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
4
Copyright (c) 2013, 2020, MariaDB Corporation.
5 6 7 8 9 10 11 12 13 14 15

This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.

This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
Vicențiu Ciorbaru's avatar
Vicențiu Ciorbaru committed
16
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33

*****************************************************************************/

/**************************************************//**
@file buf/buf0dblwr.cc
Doublwrite buffer module

Created 2011/12/19
*******************************************************/

#include "buf0dblwr.h"
#include "buf0buf.h"
#include "buf0checksum.h"
#include "srv0start.h"
#include "srv0srv.h"
#include "page0zip.h"
#include "trx0sys.h"
34
#include "fil0crypt.h"
35
#include "fil0pagecompress.h"
36

37 38
using st_::span;

39
/** The doublewrite buffer */
40
buf_dblwr_t*	buf_dblwr = NULL;
41 42

/** Set to TRUE when the doublewrite buffer is being created */
43
ibool	buf_dblwr_being_created = FALSE;
44

Monty's avatar
Monty committed
45 46
#define TRX_SYS_DOUBLEWRITE_BLOCKS 2

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/****************************************************************//**
Determines if a page number is located inside the doublewrite buffer.
@return TRUE if the location is inside the two blocks of the
doublewrite buffer */
ibool
buf_dblwr_page_inside(
/*==================*/
	ulint	page_no)	/*!< in: page number */
{
	if (buf_dblwr == NULL) {

		return(FALSE);
	}

	if (page_no >= buf_dblwr->block1
	    && page_no < buf_dblwr->block1
	    + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
		return(TRUE);
	}

	if (page_no >= buf_dblwr->block2
	    && page_no < buf_dblwr->block2
	    + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
		return(TRUE);
	}

	return(FALSE);
}

76 77
/** @return the TRX_SYS page */
inline buf_block_t *buf_dblwr_trx_sys_get(mtr_t *mtr)
78
{
79 80 81 82
  buf_block_t *block= buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
                                   0, RW_X_LATCH, mtr);
  buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
  return block;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
}

/********************************************************************//**
Flush a batch of writes to the datafiles that have already been
written to the dblwr buffer on disk. */
void
buf_dblwr_sync_datafiles()
/*======================*/
{
	/* Wait that all async writes to tablespaces have been posted to
	the OS */
	os_aio_wait_until_no_pending_writes();
}

/****************************************************************//**
Creates or initialializes the doublewrite buffer at a database start. */
99
static void buf_dblwr_init(const byte *doublewrite)
100 101 102 103
{
	ulint	buf_size;

	buf_dblwr = static_cast<buf_dblwr_t*>(
104
		ut_zalloc_nokey(sizeof(buf_dblwr_t)));
105 106 107

	/* There are two blocks of same size in the doublewrite
	buffer. */
Monty's avatar
Monty committed
108
	buf_size = TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE;
109 110 111 112 113 114

	/* There must be atleast one buffer for single page writes
	and one buffer for batch writes. */
	ut_a(srv_doublewrite_batch_size > 0
	     && srv_doublewrite_batch_size < buf_size);

115
	mutex_create(LATCH_ID_BUF_DBLWR, &buf_dblwr->mutex);
116

117 118
	buf_dblwr->b_event = os_event_create("dblwr_batch_event");
	buf_dblwr->s_event = os_event_create("dblwr_single_event");
119 120 121 122 123 124 125 126 127 128
	buf_dblwr->first_free = 0;
	buf_dblwr->s_reserved = 0;
	buf_dblwr->b_reserved = 0;

	buf_dblwr->block1 = mach_read_from_4(
		doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK1);
	buf_dblwr->block2 = mach_read_from_4(
		doublewrite + TRX_SYS_DOUBLEWRITE_BLOCK2);

	buf_dblwr->in_use = static_cast<bool*>(
129
		ut_zalloc_nokey(buf_size * sizeof(bool)));
130 131

	buf_dblwr->write_buf = static_cast<byte*>(
132 133
		aligned_malloc(buf_size << srv_page_size_shift,
			       srv_page_size));
134 135

	buf_dblwr->buf_block_arr = static_cast<buf_page_t**>(
136
		ut_zalloc_nokey(buf_size * sizeof(void*)));
137 138
}

139 140 141 142 143
/** Create the doublewrite buffer if the doublewrite buffer header
is not present in the TRX_SYS page.
@return	whether the operation succeeded
@retval	true	if the doublewrite buffer exists or was created
@retval	false	if the creation failed (too small first data file) */
144
bool
Marko Mäkelä's avatar
Marko Mäkelä committed
145
buf_dblwr_create()
146 147 148 149 150 151 152 153 154 155 156
{
	buf_block_t*	block2;
	buf_block_t*	new_block;
	byte*	fseg_header;
	ulint	page_no;
	ulint	prev_page_no;
	ulint	i;
	mtr_t	mtr;

	if (buf_dblwr) {
		/* Already inited */
157
		return(true);
158 159 160
	}

start_again:
Marko Mäkelä's avatar
Marko Mäkelä committed
161
	mtr.start();
162 163
	buf_dblwr_being_created = TRUE;

164
	buf_block_t *trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
165

166 167
	if (mach_read_from_4(TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
			     + trx_sys_block->frame)
168 169 170 171
	    == TRX_SYS_DOUBLEWRITE_MAGIC_N) {
		/* The doublewrite buffer has already been created:
		just read in some numbers */

172
		buf_dblwr_init(TRX_SYS_DOUBLEWRITE + trx_sys_block->frame);
173

Marko Mäkelä's avatar
Marko Mäkelä committed
174
		mtr.commit();
175
		buf_dblwr_being_created = FALSE;
176
		return(true);
177
	} else {
178 179
		if (UT_LIST_GET_FIRST(fil_system.sys_space->chain)->size
		    < 3 * FSP_EXTENT_SIZE) {
180 181
			goto too_small;
		}
182 183
	}

184
	block2 = fseg_create(fil_system.sys_space, TRX_SYS_PAGE_NO,
185 186 187 188
			     TRX_SYS_DOUBLEWRITE
			     + TRX_SYS_DOUBLEWRITE_FSEG, &mtr);

	if (block2 == NULL) {
189
too_small:
Marko Mäkelä's avatar
Marko Mäkelä committed
190 191
		ib::error()
			<< "Cannot create doublewrite buffer: "
192
			"the first file in innodb_data_file_path"
Marko Mäkelä's avatar
Marko Mäkelä committed
193
			" must be at least "
194 195
			<< (3 * (FSP_EXTENT_SIZE
				 >> (20U - srv_page_size_shift)))
Marko Mäkelä's avatar
Marko Mäkelä committed
196
			<< "M.";
197
		mtr.commit();
198
		return(false);
199 200
	}

Marko Mäkelä's avatar
Marko Mäkelä committed
201
	ib::info() << "Doublewrite buffer not found: creating new";
202 203 204 205 206 207 208

	/* FIXME: After this point, the doublewrite buffer creation
	is not atomic. The doublewrite buffer should not exist in
	the InnoDB system tablespace file in the first place.
	It could be located in separate optional file(s) in a
	user-specified location. */

209 210 211 212 213
	/* fseg_create acquires a second latch on the page,
	therefore we must declare it: */

	buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);

214 215
	fseg_header = TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG
		+ trx_sys_block->frame;
216 217
	prev_page_no = 0;

Monty's avatar
Monty committed
218
	for (i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
219 220 221 222
		     + FSP_EXTENT_SIZE / 2; i++) {
		new_block = fseg_alloc_free_page(
			fseg_header, prev_page_no + 1, FSP_UP, &mtr);
		if (new_block == NULL) {
223 224 225
			ib::error() << "Cannot create doublewrite buffer: "
				" you must increase your tablespace size."
				" Cannot continue operation.";
226 227 228 229 230 231
			/* This may essentially corrupt the doublewrite
			buffer. However, usually the doublewrite buffer
			is created at database initialization, and it
			should not matter (just remove all newly created
			InnoDB files and restart). */
			mtr.commit();
232
			return(false);
233 234 235 236 237 238 239 240 241 242 243 244
		}

		/* We read the allocated pages to the buffer pool;
		when they are written to disk in a flush, the space
		id and page number fields are also written to the
		pages. When we at database startup read pages
		from the doublewrite buffer, we know that if the
		space id and page number in them are the same as
		the page position in the tablespace, then the page
		has not been written to in doublewrite. */

		ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1);
245
		page_no = new_block->page.id.page_no();
246
		/* We only do this in the debug build, to ensure that
247
		the check in buf_flush_init_for_writing() will see a valid
248 249
		page type. The flushes of new_block are actually
		unnecessary here.  */
250 251 252
		ut_d(mtr.write<2>(*new_block,
				  FIL_PAGE_TYPE + new_block->frame,
				  FIL_PAGE_TYPE_SYS));
253 254 255

		if (i == FSP_EXTENT_SIZE / 2) {
			ut_a(page_no == FSP_EXTENT_SIZE);
256 257 258 259 260 261 262 263 264 265 266
			mtr.write<4>(*trx_sys_block,
				     TRX_SYS_DOUBLEWRITE
				     + TRX_SYS_DOUBLEWRITE_BLOCK1
				     + trx_sys_block->frame,
				     page_no);
			mtr.write<4>(*trx_sys_block,
				     TRX_SYS_DOUBLEWRITE
				     + TRX_SYS_DOUBLEWRITE_REPEAT
				     + TRX_SYS_DOUBLEWRITE_BLOCK1
				     + trx_sys_block->frame,
				     page_no);
267 268 269 270

		} else if (i == FSP_EXTENT_SIZE / 2
			   + TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
			ut_a(page_no == 2 * FSP_EXTENT_SIZE);
271 272 273 274 275 276 277 278 279 280 281
			mtr.write<4>(*trx_sys_block,
				     TRX_SYS_DOUBLEWRITE
				     + TRX_SYS_DOUBLEWRITE_BLOCK2
				     + trx_sys_block->frame,
				     page_no);
			mtr.write<4>(*trx_sys_block,
				     TRX_SYS_DOUBLEWRITE
				     + TRX_SYS_DOUBLEWRITE_REPEAT
				     + TRX_SYS_DOUBLEWRITE_BLOCK2
				     + trx_sys_block->frame,
				     page_no);
282 283 284 285 286 287 288 289 290 291 292 293 294 295
		} else if (i > FSP_EXTENT_SIZE / 2) {
			ut_a(page_no == prev_page_no + 1);
		}

		if (((i + 1) & 15) == 0) {
			/* rw_locks can only be recursively x-locked
			2048 times. (on 32 bit platforms,
			(lint) 0 - (X_LOCK_DECR * 2049)
			is no longer a negative number, and thus
			lock_word becomes like a shared lock).
			For 4k page size this loop will
			lock the fseg header too many times. Since
			this code is not done while any other threads
			are active, restart the MTR occasionally. */
296 297 298 299 300 301
			mtr.commit();
			mtr.start();
			trx_sys_block = buf_dblwr_trx_sys_get(&mtr);
			fseg_header = TRX_SYS_DOUBLEWRITE
				+ TRX_SYS_DOUBLEWRITE_FSEG
				+ trx_sys_block->frame;
302 303 304 305 306
		}

		prev_page_no = page_no;
	}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	mtr.write<4>(*trx_sys_block,
		     TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
		     + trx_sys_block->frame,
		     TRX_SYS_DOUBLEWRITE_MAGIC_N);
	mtr.write<4>(*trx_sys_block,
		     TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_MAGIC
		     + TRX_SYS_DOUBLEWRITE_REPEAT
		     + trx_sys_block->frame,
		     TRX_SYS_DOUBLEWRITE_MAGIC_N);

	mtr.write<4>(*trx_sys_block,
		     TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED
		     + trx_sys_block->frame,
		     TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N);
	mtr.commit();
322 323

	/* Flush the modified pages to disk and make a checkpoint */
324
	log_make_checkpoint();
325
	buf_dblwr_being_created = FALSE;
326 327 328 329

	/* Remove doublewrite pages from LRU */
	buf_pool_invalidate();

330
	ib::info() <<  "Doublewrite buffer created";
331 332 333 334

	goto start_again;
}

335 336
/**
At database startup initializes the doublewrite buffer memory structure if
337 338 339
we already have a doublewrite buffer created in the data files. If we are
upgrading to an InnoDB version which supports multiple tablespaces, then this
function performs the necessary update operations. If we are in a crash
340 341 342 343 344
recovery, this function loads the pages from double write buffer into memory.
@param[in]	file		File handle
@param[in]	path		Path name of file
@return DB_SUCCESS or error code */
dberr_t
Sergei Golubchik's avatar
Sergei Golubchik committed
345
buf_dblwr_init_or_load_pages(
Vicențiu Ciorbaru's avatar
Vicențiu Ciorbaru committed
346
	pfs_os_file_t	file,
347
	const char*	path)
348
{
349 350 351 352 353 354 355 356
	byte*		buf;
	byte*		page;
	ulint		block1;
	ulint		block2;
	ulint		space_id;
	byte*		read_buf;
	byte*		doublewrite;
	ibool		reset_space_ids = FALSE;
357
	recv_dblwr_t&	recv_dblwr = recv_sys.dblwr;
358 359 360

	/* We do the file i/o past the buffer pool */
	read_buf = static_cast<byte*>(
361
		aligned_malloc(2 * srv_page_size, srv_page_size));
362 363 364

	/* Read the trx sys header to check if we are using the doublewrite
	buffer */
365 366
	dberr_t		err;

367
	IORequest       read_request(IORequest::READ);
368 369 370

	err = os_file_read(
		read_request,
371
		file, read_buf, TRX_SYS_PAGE_NO << srv_page_size_shift,
372
		srv_page_size);
373 374 375 376 377

	if (err != DB_SUCCESS) {

		ib::error()
			<< "Failed to read the system tablespace header page";
378 379
func_exit:
		aligned_free(read_buf);
380 381
		return(err);
	}
382

383 384
	doublewrite = read_buf + TRX_SYS_DOUBLEWRITE;

Jan Lindström's avatar
Jan Lindström committed
385
	/* TRX_SYS_PAGE_NO is not encrypted see fil_crypt_rotate_page() */
Monty's avatar
Monty committed
386

387 388 389 390 391 392 393 394 395 396 397
	if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_MAGIC)
	    == TRX_SYS_DOUBLEWRITE_MAGIC_N) {
		/* The doublewrite buffer has been created */

		buf_dblwr_init(doublewrite);

		block1 = buf_dblwr->block1;
		block2 = buf_dblwr->block2;

		buf = buf_dblwr->write_buf;
	} else {
398 399
		err = DB_SUCCESS;
		goto func_exit;
400 401 402 403 404 405 406 407 408 409 410 411 412
	}

	if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED)
	    != TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N) {

		/* We are upgrading from a version < 4.1.x to a version where
		multiple tablespaces are supported. We must reset the space id
		field in the pages in the doublewrite buffer because starting
		from this version the space id is stored to
		FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID. */

		reset_space_ids = TRUE;

413
		ib::info() << "Resetting space id's in the doublewrite buffer";
414 415 416
	}

	/* Read the pages from the doublewrite buffer to memory */
417 418
	err = os_file_read(
		read_request,
419 420
		file, buf, block1 << srv_page_size_shift,
		TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift);
421 422 423 424 425 426

	if (err != DB_SUCCESS) {

		ib::error()
			<< "Failed to read the first double write buffer "
			"extent";
427
		goto func_exit;
428 429 430 431 432
	}

	err = os_file_read(
		read_request,
		file,
433 434 435
		buf + (TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift),
		block2 << srv_page_size_shift,
		TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift);
436

437
	if (err != DB_SUCCESS) {
Sergei Golubchik's avatar
Sergei Golubchik committed
438

439 440 441
		ib::error()
			<< "Failed to read the second double write buffer "
			"extent";
442
		goto func_exit;
443
	}
444

445 446 447 448 449
	/* Check if any of these pages is half-written in data files, in the
	intended position */

	page = buf;

450
	for (ulint i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 2; i++) {
451

452
		if (reset_space_ids) {
453
			ulint source_page_no;
454 455

			space_id = 0;
456 457
			mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
					space_id);
458 459 460 461 462 463 464 465 466 467 468
			/* We do not need to calculate new checksums for the
			pages because the field .._SPACE_ID does not affect
			them. Write the page back to where we read it from. */

			if (i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
				source_page_no = block1 + i;
			} else {
				source_page_no = block2
					+ i - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE;
			}

469
			err = os_file_write(
470
				IORequestWrite, path, file, page,
471
				source_page_no << srv_page_size_shift,
472
				srv_page_size);
473 474 475 476 477
			if (err != DB_SUCCESS) {

				ib::error()
					<< "Failed to write to the double write"
					" buffer";
478
				goto func_exit;
479
			}
480
		} else if (mach_read_from_8(page + FIL_PAGE_LSN)) {
Marko Mäkelä's avatar
Marko Mäkelä committed
481 482
			/* Each valid page header must contain
			a nonzero FIL_PAGE_LSN field. */
Sergei Golubchik's avatar
Sergei Golubchik committed
483
			recv_dblwr.add(page);
484 485
		}

486
		page += srv_page_size;
Sergei Golubchik's avatar
Sergei Golubchik committed
487 488
	}

Sergei Golubchik's avatar
Sergei Golubchik committed
489 490 491
	if (reset_space_ids) {
		os_file_flush(file);
	}
Sergei Golubchik's avatar
Sergei Golubchik committed
492

493 494
	err = DB_SUCCESS;
	goto func_exit;
Sergei Golubchik's avatar
Sergei Golubchik committed
495 496
}

497
/** Process and remove the double write buffer pages for all tablespaces. */
Sergei Golubchik's avatar
Sergei Golubchik committed
498
void
Marko Mäkelä's avatar
Marko Mäkelä committed
499
buf_dblwr_process()
Sergei Golubchik's avatar
Sergei Golubchik committed
500
{
501 502
	ulint		page_no_dblwr	= 0;
	byte*		read_buf;
503
	recv_dblwr_t&	recv_dblwr	= recv_sys.dblwr;
Sergei Golubchik's avatar
Sergei Golubchik committed
504

505 506 507 508
	if (!buf_dblwr) {
		return;
	}

Sergei Golubchik's avatar
Sergei Golubchik committed
509
	read_buf = static_cast<byte*>(
510
		aligned_malloc(2 * srv_page_size, srv_page_size));
Marko Mäkelä's avatar
Marko Mäkelä committed
511
	byte* const buf = read_buf + srv_page_size;
Sergei Golubchik's avatar
Sergei Golubchik committed
512

513 514 515
	for (recv_dblwr_t::list::iterator i = recv_dblwr.pages.begin();
	     i != recv_dblwr.pages.end();
	     ++i, ++page_no_dblwr) {
Marko Mäkelä's avatar
Marko Mäkelä committed
516 517
		byte*	page		= *i;
		ulint	space_id	= page_get_space_id(page);
518
		fil_space_t*	space = fil_space_get(space_id);
Sergei Golubchik's avatar
Sergei Golubchik committed
519

520 521
		if (space == NULL) {
			/* Maybe we have dropped the tablespace
522
			and this page once belonged to it: do nothing */
523 524 525 526
			continue;
		}

		fil_space_open_if_needed(space);
527

Marko Mäkelä's avatar
Marko Mäkelä committed
528
		const ulint		page_no	= page_get_page_no(page);
Marko Mäkelä's avatar
Marko Mäkelä committed
529 530
		const page_id_t		page_id(space_id, page_no);

531 532
		if (page_no >= space->size) {

533 534 535
			/* Do not report the warning for undo
			tablespaces, because they can be truncated in place. */
			if (!srv_is_undo_tablespace(space_id)) {
Marko Mäkelä's avatar
Marko Mäkelä committed
536 537 538 539
				ib::warn() << "A copy of page " << page_id
					<< " in the doublewrite buffer slot "
					<< page_no_dblwr
					<< " is not within space bounds";
540
			}
541 542
			continue;
		}
543

544 545
		const ulint physical_size = space->physical_size();
		const ulint zip_size = space->zip_size();
Marko Mäkelä's avatar
Marko Mäkelä committed
546
		ut_ad(!buf_is_zeroes(span<const byte>(page, physical_size)));
547

Marko Mäkelä's avatar
Marko Mäkelä committed
548 549
		/* We want to ensure that for partial reads the
		unread portion of the page is NUL. */
550
		memset(read_buf, 0x0, physical_size);
551

Marko Mäkelä's avatar
Marko Mäkelä committed
552
		IORequest	request;
553

Marko Mäkelä's avatar
Marko Mäkelä committed
554
		request.dblwr_recover();
555

556
		/* Read in the actual page from the file */
Marko Mäkelä's avatar
Marko Mäkelä committed
557 558
		dberr_t	err = fil_io(
			request, true,
559 560
			page_id, zip_size,
			0, physical_size, read_buf, NULL);
561

Marko Mäkelä's avatar
Marko Mäkelä committed
562 563 564 565 566 567
		if (err != DB_SUCCESS) {
			ib::warn()
				<< "Double write buffer recovery: "
				<< page_id << " read failed with "
				<< "error: " << ut_strerr(err);
		}
568

569
		const bool is_all_zero = buf_is_zeroes(
Marko Mäkelä's avatar
Marko Mäkelä committed
570
			span<const byte>(read_buf, physical_size));
Marko Mäkelä's avatar
Marko Mäkelä committed
571 572
		const bool expect_encrypted = space->crypt_data
			&& space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED;
573
		bool is_corrupted = false;
574

575 576 577 578 579
		if (is_all_zero) {
			/* We will check if the copy in the
			doublewrite buffer is valid. If not, we will
			ignore this page (there should be redo log
			records to initialize it). */
580
		} else {
581 582
			/* Decompress the page before
			validating the checksum. */
583 584
			ulint decomp = fil_page_decompress(buf, read_buf,
							   space->flags);
585
			if (!decomp || (zip_size && decomp != srv_page_size)) {
586
				goto bad;
587
			}
588

589 590 591 592 593 594 595 596 597 598
			if (expect_encrypted
			    && buf_page_get_key_version(read_buf, space->flags)) {
				is_corrupted = !buf_page_verify_crypt_checksum(
							read_buf, space->flags);
			} else {
				is_corrupted = buf_page_is_corrupted(
					true, read_buf, space->flags);
			}

			if (!is_corrupted) {
599 600 601
				/* The page is good; there is no need
				to consult the doublewrite buffer. */
				continue;
602 603
			}

604
bad:
605 606
			/* We intentionally skip this message for
			is_all_zero pages. */
Marko Mäkelä's avatar
Marko Mäkelä committed
607 608 609
			ib::info()
				<< "Trying to recover page " << page_id
				<< " from the doublewrite buffer.";
610
		}
Sergei Golubchik's avatar
Sergei Golubchik committed
611

612
		ulint decomp = fil_page_decompress(buf, page, space->flags);
613
		if (!decomp || (zip_size && decomp != srv_page_size)) {
614
			continue;
615
		}
616

617 618 619 620 621 622 623 624 625 626
		if (expect_encrypted
		    && buf_page_get_key_version(read_buf, space->flags)) {
			is_corrupted = !buf_page_verify_crypt_checksum(
						page, space->flags);
		} else {
			is_corrupted = buf_page_is_corrupted(
					true, page, space->flags);
		}

		if (is_corrupted) {
627 628 629 630 631 632 633
			/* Theoretically we could have another good
			copy for this page in the doublewrite
			buffer. If not, we will report a fatal error
			for a corrupted page somewhere else if that
			page was truly needed. */
			continue;
		}
Sergei Golubchik's avatar
Sergei Golubchik committed
634

635 636 637
		if (page_no == 0) {
			/* Check the FSP_SPACE_FLAGS. */
			ulint flags = fsp_header_get_flags(page);
638
			if (!fil_space_t::is_valid_flags(flags, space_id)
639 640
			    && fsp_flags_convert_from_101(flags)
			    == ULINT_UNDEFINED) {
Marko Mäkelä's avatar
Marko Mäkelä committed
641 642 643 644
				ib::warn() << "Ignoring a doublewrite copy"
					" of page " << page_id
					<< " due to invalid flags "
					<< ib::hex(flags);
645
				continue;
646
			}
647 648
			/* The flags on the page should be converted later. */
		}
649

650 651
		/* Write the good page from the doublewrite buffer to
		the intended position. */
Monty's avatar
Monty committed
652

Marko Mäkelä's avatar
Marko Mäkelä committed
653
		IORequest	write_request(IORequest::WRITE);
Monty's avatar
Monty committed
654

655 656
		fil_io(write_request, true, page_id, zip_size,
		       0, physical_size,
657
				const_cast<byte*>(page), NULL);
658

Marko Mäkelä's avatar
Marko Mäkelä committed
659 660
		ib::info() << "Recovered page " << page_id
			<< " from the doublewrite buffer.";
661 662 663 664
	}

	recv_dblwr.pages.clear();

665
	fil_flush_file_spaces();
666
	aligned_free(read_buf);
667 668 669 670 671
}

/****************************************************************//**
Frees doublewrite buffer. */
void
Marko Mäkelä's avatar
Marko Mäkelä committed
672
buf_dblwr_free()
673 674 675 676 677 678
{
	/* Free the double write data structures. */
	ut_a(buf_dblwr != NULL);
	ut_ad(buf_dblwr->s_reserved == 0);
	ut_ad(buf_dblwr->b_reserved == 0);

679 680
	os_event_destroy(buf_dblwr->b_event);
	os_event_destroy(buf_dblwr->s_event);
681
	aligned_free(buf_dblwr->write_buf);
682 683
	ut_free(buf_dblwr->buf_block_arr);
	ut_free(buf_dblwr->in_use);
684
	mutex_free(&buf_dblwr->mutex);
685
	ut_free(buf_dblwr);
686 687 688 689 690 691 692 693 694 695 696
	buf_dblwr = NULL;
}

/********************************************************************//**
Updates the doublewrite buffer when an IO request is completed. */
void
buf_dblwr_update(
/*=============*/
	const buf_page_t*	bpage,	/*!< in: buffer block descriptor */
	buf_flush_t		flush_type)/*!< in: flush type */
{
697 698 699
	ut_ad(srv_use_doublewrite_buf);
	ut_ad(buf_dblwr);
	ut_ad(!fsp_is_system_temporary(bpage->id.space()));
700 701
	ut_ad(!srv_read_only_mode);

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	switch (flush_type) {
	case BUF_FLUSH_LIST:
	case BUF_FLUSH_LRU:
		mutex_enter(&buf_dblwr->mutex);

		ut_ad(buf_dblwr->batch_running);
		ut_ad(buf_dblwr->b_reserved > 0);
		ut_ad(buf_dblwr->b_reserved <= buf_dblwr->first_free);

		buf_dblwr->b_reserved--;

		if (buf_dblwr->b_reserved == 0) {
			mutex_exit(&buf_dblwr->mutex);
			/* This will finish the batch. Sync data files
			to the disk. */
717
			fil_flush_file_spaces();
718 719 720 721 722 723 724 725 726 727 728 729
			mutex_enter(&buf_dblwr->mutex);

			/* We can now reuse the doublewrite memory buffer: */
			buf_dblwr->first_free = 0;
			buf_dblwr->batch_running = false;
			os_event_set(buf_dblwr->b_event);
		}

		mutex_exit(&buf_dblwr->mutex);
		break;
	case BUF_FLUSH_SINGLE_PAGE:
		{
Monty's avatar
Monty committed
730
			const ulint size = TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE;
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
			ulint i;
			mutex_enter(&buf_dblwr->mutex);
			for (i = srv_doublewrite_batch_size; i < size; ++i) {
				if (buf_dblwr->buf_block_arr[i] == bpage) {
					buf_dblwr->s_reserved--;
					buf_dblwr->buf_block_arr[i] = NULL;
					buf_dblwr->in_use[i] = false;
					break;
				}
			}

			/* The block we are looking for must exist as a
			reserved block. */
			ut_a(i < size);
		}
		os_event_set(buf_dblwr->s_event);
		mutex_exit(&buf_dblwr->mutex);
		break;
	case BUF_FLUSH_N_TYPES:
		ut_error;
	}
}

754 755
#ifdef UNIV_DEBUG
/** Check the LSN values on the page.
756 757
@param[in] page  page to check
@param[in] s     tablespace */
758
static void buf_dblwr_check_page_lsn(const page_t* page, const fil_space_t& s)
759
{
760 761 762 763 764 765 766 767 768 769 770
  /* Ignore page compressed or encrypted pages */
  if (s.is_compressed() || buf_page_get_key_version(page, s.flags))
    return;
  const byte* lsn_start= FIL_PAGE_LSN + 4 + page;
  const byte* lsn_end= page +
    srv_page_size - (s.full_crc32()
    ? FIL_PAGE_FCRC32_END_LSN
    : FIL_PAGE_END_LSN_OLD_CHKSUM - 4);
  static_assert(FIL_PAGE_FCRC32_END_LSN % 4 == 0, "alignment");
  static_assert(FIL_PAGE_LSN % 4 == 0, "alignment");
  ut_ad(!memcmp_aligned<4>(lsn_start, lsn_end, 4));
771 772
}

773 774 775 776 777 778 779 780 781
static void buf_dblwr_check_page_lsn(const buf_page_t& b, const byte* page)
{
	if (fil_space_t* space = fil_space_acquire_for_io(b.id.space())) {
		buf_dblwr_check_page_lsn(page, *space);
		space->release_for_io();
	}
}
#endif /* UNIV_DEBUG */

782 783 784 785 786 787 788 789 790
/********************************************************************//**
Asserts when a corrupt block is find during writing out data to the
disk. */
static
void
buf_dblwr_assert_on_corrupt_block(
/*==============================*/
	const buf_block_t*	block)	/*!< in: block to check */
{
791
	buf_page_print(block->frame);
792 793 794 795 796 797

	ib::fatal() << "Apparent corruption of an index page "
		<< block->page.id
		<< " to be written to data file. We intentionally crash"
		" the server to prevent corrupt data from ending up in"
		" data files.";
798 799 800 801 802 803 804 805 806 807 808
}

/********************************************************************//**
Check the LSN values on the page with which this block is associated.
Also validate the page if the option is set. */
static
void
buf_dblwr_check_block(
/*==================*/
	const buf_block_t*	block)	/*!< in: block to check */
{
809
	ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
810

811
	if (block->skip_flush_check) {
812 813 814
		return;
	}

815 816
	switch (fil_page_get_type(block->frame)) {
	case FIL_PAGE_INDEX:
817
	case FIL_PAGE_TYPE_INSTANT:
818 819 820 821 822 823 824
	case FIL_PAGE_RTREE:
		if (page_is_comp(block->frame)) {
			if (page_simple_validate_new(block->frame)) {
				return;
			}
		} else if (page_simple_validate_old(block->frame)) {
			return;
825
		}
826 827 828 829
		/* While it is possible that this is not an index page
		but just happens to have wrongly set FIL_PAGE_TYPE,
		such pages should never be modified to without also
		adjusting the page type during page allocation or
Marko Mäkelä's avatar
Marko Mäkelä committed
830
		buf_flush_init_for_writing() or fil_block_reset_type(). */
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
		break;
	case FIL_PAGE_TYPE_FSP_HDR:
	case FIL_PAGE_IBUF_BITMAP:
	case FIL_PAGE_TYPE_UNKNOWN:
		/* Do not complain again, we already reset this field. */
	case FIL_PAGE_UNDO_LOG:
	case FIL_PAGE_INODE:
	case FIL_PAGE_IBUF_FREE_LIST:
	case FIL_PAGE_TYPE_SYS:
	case FIL_PAGE_TYPE_TRX_SYS:
	case FIL_PAGE_TYPE_XDES:
	case FIL_PAGE_TYPE_BLOB:
	case FIL_PAGE_TYPE_ZBLOB:
	case FIL_PAGE_TYPE_ZBLOB2:
		/* TODO: validate also non-index pages */
		return;
	case FIL_PAGE_TYPE_ALLOCATED:
		/* empty pages should never be flushed */
		return;
850
	}
851 852

	buf_dblwr_assert_on_corrupt_block(block);
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
}

/********************************************************************//**
Writes a page that has already been written to the doublewrite buffer
to the datafile. It is the job of the caller to sync the datafile. */
static
void
buf_dblwr_write_block_to_datafile(
/*==============================*/
	const buf_page_t*	bpage,	/*!< in: page to write */
	bool			sync)	/*!< in: true if sync IO
					is requested */
{
	ut_a(buf_page_in_file(bpage));

868
	ulint	type = IORequest::WRITE;
869
	IORequest	request(type, const_cast<buf_page_t*>(bpage));
870

871 872
	/* We request frame here to get correct buffer in case of
	encryption and/or page compression */
Monty's avatar
Monty committed
873 874
	void * frame = buf_page_get_frame(bpage);

875
	if (bpage->zip.data != NULL) {
876
		ut_ad(bpage->zip_size());
877

878 879
		fil_io(request, sync, bpage->id, bpage->zip_size(), 0,
		       bpage->zip_size(),
880
		       (void*) frame,
881
		       (void*) bpage);
882
	} else {
883
		ut_ad(!bpage->zip_size());
884 885 886

		/* Our IO API is common for both reads and writes and is
		therefore geared towards a non-const parameter. */
887

888 889
		buf_block_t*	block = reinterpret_cast<buf_block_t*>(
			const_cast<buf_page_t*>(bpage));
890

891
		ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
892
		ut_d(buf_dblwr_check_page_lsn(block->page, block->frame));
893
		fil_io(request,
894 895
		       sync, bpage->id, bpage->zip_size(), 0, bpage->real_size,
		       frame, block);
896
	}
897 898 899
}

/********************************************************************//**
900 901
Flushes possible buffered writes from the doublewrite memory buffer to disk.
It is very important to call this function after a batch of writes has been posted,
902 903 904
and also when we may have to wait for a page latch! Otherwise a deadlock
of threads can occur. */
void
Marko Mäkelä's avatar
Marko Mäkelä committed
905
buf_dblwr_flush_buffered_writes()
906 907 908 909 910 911 912 913
{
	byte*		write_buf;
	ulint		first_free;
	ulint		len;

	if (!srv_use_doublewrite_buf || buf_dblwr == NULL) {
		/* Sync the writes to the disk. */
		buf_dblwr_sync_datafiles();
914
		/* Now we flush the data to disk (for example, with fsync) */
915
		fil_flush_file_spaces();
916 917 918
		return;
	}

919 920
	ut_ad(!srv_read_only_mode);

921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
try_again:
	mutex_enter(&buf_dblwr->mutex);

	/* Write first to doublewrite buffer blocks. We use synchronous
	aio and thus know that file write has been completed when the
	control returns. */

	if (buf_dblwr->first_free == 0) {

		mutex_exit(&buf_dblwr->mutex);
		return;
	}

	if (buf_dblwr->batch_running) {
		/* Another thread is running the batch right now. Wait
		for it to finish. */
937
		int64_t	sig_count = os_event_reset(buf_dblwr->b_event);
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
		mutex_exit(&buf_dblwr->mutex);

		os_event_wait_low(buf_dblwr->b_event, sig_count);
		goto try_again;
	}

	ut_ad(buf_dblwr->first_free == buf_dblwr->b_reserved);

	/* Disallow anyone else to post to doublewrite buffer or to
	start another batch of flushing. */
	buf_dblwr->batch_running = true;
	first_free = buf_dblwr->first_free;

	/* Now safe to release the mutex. Note that though no other
	thread is allowed to post to the doublewrite batch flushing
	but any threads working on single page flushes are allowed
	to proceed. */
	mutex_exit(&buf_dblwr->mutex);

	write_buf = buf_dblwr->write_buf;

	for (ulint len2 = 0, i = 0;
	     i < buf_dblwr->first_free;
961
	     len2 += srv_page_size, i++) {
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976

		const buf_block_t*	block;

		block = (buf_block_t*) buf_dblwr->buf_block_arr[i];

		if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE
		    || block->page.zip.data) {
			/* No simple validate for compressed
			pages exists. */
			continue;
		}

		/* Check that the actual page in the buffer pool is
		not corrupt and the LSN values are sane. */
		buf_dblwr_check_block(block);
977
		ut_d(buf_dblwr_check_page_lsn(block->page, write_buf + len2));
978 979 980
	}

	/* Write out the first block of the doublewrite buffer */
981 982
	len = std::min<ulint>(TRX_SYS_DOUBLEWRITE_BLOCK_SIZE,
			      buf_dblwr->first_free) << srv_page_size_shift;
983

984
	fil_io(IORequestWrite, true,
985
	       page_id_t(TRX_SYS_SPACE, buf_dblwr->block1), 0,
986
	       0, len, (void*) write_buf, NULL);
987 988 989 990 991 992 993 994

	if (buf_dblwr->first_free <= TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
		/* No unwritten pages in the second block. */
		goto flush;
	}

	/* Write out the second block of the doublewrite buffer. */
	len = (buf_dblwr->first_free - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
995
	       << srv_page_size_shift;
996 997

	write_buf = buf_dblwr->write_buf
998
		+ (TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift);
999

1000
	fil_io(IORequestWrite, true,
1001
	       page_id_t(TRX_SYS_SPACE, buf_dblwr->block2), 0,
1002
	       0, len, (void*) write_buf, NULL);
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057

flush:
	/* increment the doublewrite flushed pages counter */
	srv_stats.dblwr_pages_written.add(buf_dblwr->first_free);
	srv_stats.dblwr_writes.inc();

	/* Now flush the doublewrite buffer data to disk */
	fil_flush(TRX_SYS_SPACE);

	/* We know that the writes have been flushed to disk now
	and in recovery we will find them in the doublewrite buffer
	blocks. Next do the writes to the intended positions. */

	/* Up to this point first_free and buf_dblwr->first_free are
	same because we have set the buf_dblwr->batch_running flag
	disallowing any other thread to post any request but we
	can't safely access buf_dblwr->first_free in the loop below.
	This is so because it is possible that after we are done with
	the last iteration and before we terminate the loop, the batch
	gets finished in the IO helper thread and another thread posts
	a new batch setting buf_dblwr->first_free to a higher value.
	If this happens and we are using buf_dblwr->first_free in the
	loop termination condition then we'll end up dispatching
	the same block twice from two different threads. */
	ut_ad(first_free == buf_dblwr->first_free);
	for (ulint i = 0; i < first_free; i++) {
		buf_dblwr_write_block_to_datafile(
			buf_dblwr->buf_block_arr[i], false);
	}
}

/********************************************************************//**
Posts a buffer page for writing. If the doublewrite memory buffer is
full, calls buf_dblwr_flush_buffered_writes and waits for for free
space to appear. */
void
buf_dblwr_add_to_batch(
/*====================*/
	buf_page_t*	bpage)	/*!< in: buffer block to write */
{
	ut_a(buf_page_in_file(bpage));

try_again:
	mutex_enter(&buf_dblwr->mutex);

	ut_a(buf_dblwr->first_free <= srv_doublewrite_batch_size);

	if (buf_dblwr->batch_running) {

		/* This not nearly as bad as it looks. There is only
		page_cleaner thread which does background flushing
		in batches therefore it is unlikely to be a contention
		point. The only exception is when a user thread is
		forced to do a flush batch because of a sync
		checkpoint. */
1058
		int64_t	sig_count = os_event_reset(buf_dblwr->b_event);
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
		mutex_exit(&buf_dblwr->mutex);

		os_event_wait_low(buf_dblwr->b_event, sig_count);
		goto try_again;
	}

	if (buf_dblwr->first_free == srv_doublewrite_batch_size) {
		mutex_exit(&(buf_dblwr->mutex));

		buf_dblwr_flush_buffered_writes();

		goto try_again;
	}

1073
	byte*	p = buf_dblwr->write_buf
1074
		+ srv_page_size * buf_dblwr->first_free;
1075 1076 1077

	/* We request frame here to get correct buffer in case of
	encryption and/or page compression */
Monty's avatar
Monty committed
1078
	void * frame = buf_page_get_frame(bpage);
1079

1080 1081
	if (auto zip_size = bpage->zip_size()) {
		UNIV_MEM_ASSERT_RW(bpage->zip.data, zip_size);
1082
		/* Copy the compressed page and clear the rest. */
1083 1084
		memcpy(p, frame, zip_size);
		memset(p + zip_size, 0x0, srv_page_size - zip_size);
1085 1086 1087
	} else {
		ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);

1088
		UNIV_MEM_ASSERT_RW(frame, srv_page_size);
1089 1090
		memcpy_aligned<OS_FILE_LOG_BLOCK_SIZE>(p, frame,
						       srv_page_size);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
	}

	buf_dblwr->buf_block_arr[buf_dblwr->first_free] = bpage;

	buf_dblwr->first_free++;
	buf_dblwr->b_reserved++;

	ut_ad(!buf_dblwr->batch_running);
	ut_ad(buf_dblwr->first_free == buf_dblwr->b_reserved);
	ut_ad(buf_dblwr->b_reserved <= srv_doublewrite_batch_size);

	if (buf_dblwr->first_free == srv_doublewrite_batch_size) {
		mutex_exit(&(buf_dblwr->mutex));

		buf_dblwr_flush_buffered_writes();

		return;
	}

	mutex_exit(&(buf_dblwr->mutex));
}

/********************************************************************//**
Writes a page to the doublewrite buffer on disk, sync it, then write
the page to the datafile and sync the datafile. This function is used
for single page flushes. If all the buffers allocated for single page
flushes in the doublewrite buffer are in use we wait here for one to
become free. We are guaranteed that a slot will become free because any
thread that is using a slot must also release the slot before leaving
this function. */
void
buf_dblwr_write_single_page(
/*========================*/
	buf_page_t*	bpage,	/*!< in: buffer block to write */
	bool		sync)	/*!< in: true if sync IO requested */
{
	ulint		n_slots;
	ulint		size;
	ulint		offset;
	ulint		i;

	ut_a(buf_page_in_file(bpage));
	ut_a(srv_use_doublewrite_buf);
	ut_a(buf_dblwr != NULL);

	/* total number of slots available for single page flushes
	starts from srv_doublewrite_batch_size to the end of the
	buffer. */
Monty's avatar
Monty committed
1139
	size = TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE;
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	ut_a(size > srv_doublewrite_batch_size);
	n_slots = size - srv_doublewrite_batch_size;

	if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {

		/* Check that the actual page in the buffer pool is
		not corrupt and the LSN values are sane. */
		buf_dblwr_check_block((buf_block_t*) bpage);

		/* Check that the page as written to the doublewrite
		buffer has sane LSN values. */
		if (!bpage->zip.data) {
1152 1153
			ut_d(buf_dblwr_check_page_lsn(
				     *bpage, ((buf_block_t*) bpage)->frame));
1154 1155 1156 1157 1158 1159 1160 1161
		}
	}

retry:
	mutex_enter(&buf_dblwr->mutex);
	if (buf_dblwr->s_reserved == n_slots) {

		/* All slots are reserved. */
1162
		int64_t	sig_count = os_event_reset(buf_dblwr->s_event);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
		mutex_exit(&buf_dblwr->mutex);
		os_event_wait_low(buf_dblwr->s_event, sig_count);

		goto retry;
	}

	for (i = srv_doublewrite_batch_size; i < size; ++i) {

		if (!buf_dblwr->in_use[i]) {
			break;
		}
	}

	/* We are guaranteed to find a slot. */
	ut_a(i < size);
	buf_dblwr->in_use[i] = true;
	buf_dblwr->s_reserved++;
	buf_dblwr->buf_block_arr[i] = bpage;

	/* increment the doublewrite flushed pages counter */
	srv_stats.dblwr_pages_written.inc();
	srv_stats.dblwr_writes.inc();

	mutex_exit(&buf_dblwr->mutex);

	/* Lets see if we are going to write in the first or second
	block of the doublewrite buffer. */
	if (i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
		offset = buf_dblwr->block1 + i;
	} else {
		offset = buf_dblwr->block2 + i
			 - TRX_SYS_DOUBLEWRITE_BLOCK_SIZE;
	}

	/* We deal with compressed and uncompressed pages a little
	differently here. In case of uncompressed pages we can
	directly write the block to the allocated slot in the
	doublewrite buffer in the system tablespace and then after
	syncing the system table space we can proceed to write the page
	in the datafile.
	In case of compressed page we first do a memcpy of the block
	to the in-memory buffer of doublewrite before proceeding to
	write it. This is so because we want to pad the remaining
	bytes in the doublewrite page with zeros. */

1208 1209
	/* We request frame here to get correct buffer in case of
	encryption and/or page compression */
Monty's avatar
Monty committed
1210 1211
	void * frame = buf_page_get_frame(bpage);

1212
	if (auto zip_size = bpage->zip_size()) {
1213
		memcpy(buf_dblwr->write_buf + srv_page_size * i,
1214
		       frame, zip_size);
1215

1216
		memset(buf_dblwr->write_buf + srv_page_size * i
1217 1218
		       + zip_size, 0x0,
		       srv_page_size - zip_size);
1219 1220 1221 1222

		fil_io(IORequestWrite,
		       true,
		       page_id_t(TRX_SYS_SPACE, offset),
1223
		       0,
1224
		       0,
1225 1226
		       srv_page_size,
		       (void *)(buf_dblwr->write_buf + srv_page_size * i),
1227
		       NULL);
1228 1229 1230
	} else {
		/* It is a regular page. Write it directly to the
		doublewrite buffer */
1231 1232 1233
		fil_io(IORequestWrite,
		       true,
		       page_id_t(TRX_SYS_SPACE, offset),
1234
		       0,
1235
		       0,
1236
		       srv_page_size,
1237 1238
		       (void*) frame,
		       NULL);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	}

	/* Now flush the doublewrite buffer data to disk */
	fil_flush(TRX_SYS_SPACE);

	/* We know that the write has been flushed to disk now
	and during recovery we will find it in the doublewrite buffer
	blocks. Next do the write to the intended position. */
	buf_dblwr_write_block_to_datafile(bpage, sync);
}