Commit 477285c8 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-31253 Freed data pages are not always being scrubbed

fil_space_t::flush_freed(): Renamed from buf_flush_freed_pages();
this is a backport of aa458506 from 10.6.
Invoke log_write_up_to() on last_freed_lsn, instead of avoiding
the operation when the log has not yet been written.
A more costly alternative would be that log_checkpoint() would invoke
this function on every affected tablespace.
parent 279d0120
......@@ -1042,63 +1042,65 @@ static page_id_t buf_flush_check_neighbors(const fil_space_t &space,
return i;
}
MY_ATTRIBUTE((nonnull, warn_unused_result))
/** Write punch-hole or zeroes of the freed ranges when
innodb_immediate_scrub_data_uncompressed from the freed ranges.
@param space tablespace which may contain ranges of freed pages
@param writable whether the tablespace is writable
MY_ATTRIBUTE((warn_unused_result))
/** Apply freed_ranges to the file.
@param writable whether the file is writable
@return number of pages written or hole-punched */
static uint32_t buf_flush_freed_pages(fil_space_t *space, bool writable)
uint32_t fil_space_t::flush_freed(bool writable)
{
const bool punch_hole= space->punch_hole;
if (!punch_hole && !srv_immediate_scrub_data_uncompressed)
return 0;
mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex);
mysql_mutex_assert_not_owner(&buf_pool.mutex);
space->freed_range_mutex.lock();
if (space->freed_ranges.empty() ||
log_sys.get_flushed_lsn() < space->get_last_freed_lsn())
for (;;)
{
space->freed_range_mutex.unlock();
return 0;
freed_range_mutex.lock();
if (freed_ranges.empty())
{
freed_range_mutex.unlock();
return 0;
}
const lsn_t flush_lsn= last_freed_lsn;
if (log_sys.get_flushed_lsn() >= flush_lsn)
break;
freed_range_mutex.unlock();
log_write_up_to(flush_lsn, true);
}
const unsigned physical_size{space->physical_size()};
const unsigned physical{physical_size()};
range_set freed_ranges= std::move(space->freed_ranges);
range_set freed= std::move(freed_ranges);
uint32_t written= 0;
if (!writable);
else if (punch_hole)
{
for (const auto &range : freed_ranges)
for (const auto &range : freed)
{
written+= range.last - range.first + 1;
space->reacquire();
space->io(IORequest(IORequest::PUNCH_RANGE),
os_offset_t{range.first} * physical_size,
(range.last - range.first + 1) * physical_size,
nullptr);
reacquire();
io(IORequest(IORequest::PUNCH_RANGE),
os_offset_t{range.first} * physical,
(range.last - range.first + 1) * physical, nullptr);
}
}
else
{
for (const auto &range : freed_ranges)
for (const auto &range : freed)
{
written+= range.last - range.first + 1;
for (os_offset_t i= range.first; i <= range.last; i++)
{
space->reacquire();
space->io(IORequest(IORequest::WRITE_ASYNC),
i * physical_size, physical_size,
const_cast<byte*>(field_ref_zero));
reacquire();
io(IORequest(IORequest::WRITE_ASYNC), i * physical, physical,
const_cast<byte*>(field_ref_zero));
}
}
}
space->freed_range_mutex.unlock();
freed_range_mutex.unlock();
return written;
}
......@@ -1225,7 +1227,7 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max)
static std::pair<fil_space_t*, uint32_t> buf_flush_space(const uint32_t id)
{
if (fil_space_t *space= fil_space_t::get(id))
return {space, buf_flush_freed_pages(space, true)};
return {space, space->flush_freed(true)};
return {nullptr, 0};
}
......@@ -1617,7 +1619,7 @@ bool buf_flush_list_space(fil_space_t *space, ulint *n_flushed)
bool acquired= space->acquire();
{
const uint32_t written{buf_flush_freed_pages(space, acquired)};
const uint32_t written{space->flush_freed(acquired)};
mysql_mutex_lock(&buf_pool.mutex);
if (written)
buf_pool.stat.n_pages_written+= written;
......
......@@ -728,6 +728,13 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
return count;
}
/** @return whether a page has been freed */
inline bool fil_space_t::is_freed(uint32_t page)
{
std::lock_guard<std::mutex> freed_lock(freed_range_mutex);
return freed_ranges.contains(page);
}
/** Issues read requests for pages which recovery wants to read in.
@param[in] space_id tablespace id
@param[in] page_nos array of page numbers to read, with the
......@@ -747,7 +754,7 @@ void buf_read_recv_pages(ulint space_id, const uint32_t* page_nos, ulint n)
for (ulint i = 0; i < n; i++) {
/* Ignore if the page already present in freed ranges. */
if (space->freed_ranges.contains(page_nos[i])) {
if (space->is_freed(page_nos[i])) {
continue;
}
......
......@@ -415,16 +415,16 @@ struct fil_space_t final
punch hole */
bool punch_hole;
/** mutex to protect freed ranges */
std::mutex freed_range_mutex;
/** Variables to store freed ranges. This can be used to write
zeroes/punch the hole in files. Protected by freed_mutex */
range_set freed_ranges;
private:
/** mutex to protect freed_ranges and last_freed_lsn */
std::mutex freed_range_mutex;
/** Stores last page freed lsn. Protected by freed_mutex */
lsn_t last_freed_lsn;
/** Ranges of freed page numbers; protected by freed_range_mutex */
range_set freed_ranges;
/** LSN of freeing last page; protected by freed_range_mutex */
lsn_t last_freed_lsn;
public:
ulint magic_n;/*!< FIL_SPACE_MAGIC_N */
/** @return whether doublewrite buffering is needed */
......@@ -434,6 +434,14 @@ struct fil_space_t final
buf_dblwr.is_initialised();
}
/** @return whether a page has been freed */
inline bool is_freed(uint32_t page);
/** Apply freed_ranges to the file.
@param writable whether the file is writable
@return number of pages written or hole-punched */
uint32_t flush_freed(bool writable);
/** Append a file to the chain of files of a space.
@param[in] name file name of a file that is not open
@param[in] handle file handle, or OS_FILE_CLOSED
......@@ -589,8 +597,6 @@ struct fil_space_t final
/** Close all tablespace files at shutdown */
static void close_all();
/** @return last_freed_lsn */
lsn_t get_last_freed_lsn() { return last_freed_lsn; }
/** Update last_freed_lsn */
void update_last_freed_lsn(lsn_t lsn)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment