Commit f9547748 authored by Eugene Kosov's avatar Eugene Kosov

MDEV-17313 Data race in ib_counter_t

ib_counter_t: make all reads/writes to m_counter relaxed atomical
parent f545e3cf
...@@ -31,6 +31,7 @@ Created 2012/04/12 by Sunny Bains ...@@ -31,6 +31,7 @@ Created 2012/04/12 by Sunny Bains
#include <my_rdtsc.h> #include <my_rdtsc.h>
#include "univ.i" #include "univ.i"
#include "os0thread.h" #include "os0thread.h"
#include <atomic>
/** CPU cache line size */ /** CPU cache line size */
#ifdef CPU_LEVEL1_DCACHE_LINESIZE #ifdef CPU_LEVEL1_DCACHE_LINESIZE
...@@ -86,8 +87,8 @@ struct counter_indexer_t : public generic_indexer_t<Type, N> { ...@@ -86,8 +87,8 @@ struct counter_indexer_t : public generic_indexer_t<Type, N> {
#define default_indexer_t counter_indexer_t #define default_indexer_t counter_indexer_t
/** Class for using fuzzy counters. The counter is not protected by any /** Class for using fuzzy counters. The counter is relaxed atomic
mutex and the results are not guaranteed to be 100% accurate but close so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */ CACHE_LINE_SIZE bytes */
template < template <
...@@ -96,20 +97,6 @@ template < ...@@ -96,20 +97,6 @@ template <
template<typename, int> class Indexer = default_indexer_t> template<typename, int> class Indexer = default_indexer_t>
struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
{ {
#ifdef UNIV_DEBUG
~ib_counter_t()
{
size_t n = (CACHE_LINE_SIZE / sizeof(Type));
/* Check that we aren't writing outside our defined bounds. */
for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
for (size_t j = 1; j < n - 1; ++j) {
ut_ad(m_counter[i + j] == 0);
}
}
}
#endif /* UNIV_DEBUG */
/** Increment the counter by 1. */ /** Increment the counter by 1. */
void inc() UNIV_NOTHROW { add(1); } void inc() UNIV_NOTHROW { add(1); }
...@@ -129,15 +116,16 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t ...@@ -129,15 +116,16 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
ut_ad(i < UT_ARR_SIZE(m_counter)); ut_ad(i < UT_ARR_SIZE(m_counter));
m_counter[i] += n; m_counter[i].fetch_add(n, std::memory_order_relaxed);
} }
/* @return total value - not 100% accurate, since it is not atomic. */ /* @return total value - not 100% accurate, since it is relaxed atomic*/
operator Type() const UNIV_NOTHROW { operator Type() const UNIV_NOTHROW {
Type total = 0; Type total = 0;
for (size_t i = 0; i < N; ++i) { for (size_t i = 0; i < N; ++i) {
total += m_counter[m_policy.offset(i)]; total += m_counter[m_policy.offset(i)].load(
std::memory_order_relaxed);
} }
return(total); return(total);
...@@ -148,7 +136,9 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t ...@@ -148,7 +136,9 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
Indexer<Type, N>m_policy; Indexer<Type, N>m_policy;
/** Slot 0 is unused. */ /** Slot 0 is unused. */
Type m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))]; std::atomic<Type> m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))];
static_assert(sizeof(std::atomic<Type>) == sizeof(Type),
"Sizes should match");
}; };
#endif /* ut0counter_h */ #endif /* ut0counter_h */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment