Commit d68ea88e authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul Committed by Yoni Fogel

Fix #5361 by locking a little earlier. This passes with helgrind but helgrind...

Fix #5361 by locking a little earlier.  This passes with helgrind but helgrind isn't yet part of the tests.

git-svn-id: file:///svn/toku/tokudb@46721 c7de825b-a66e-492c-adef-691d508d4ae1
parent eda125ac
...@@ -263,6 +263,9 @@ void increment_partitioned_counter(PARTITIONED_COUNTER pc, uint64_t amount) ...@@ -263,6 +263,9 @@ void increment_partitioned_counter(PARTITIONED_COUNTER pc, uint64_t amount)
uint64_t pc_key = pc->pc_key; uint64_t pc_key = pc->pc_key;
struct local_counter *lc = get_thread_local_counter(pc_key, &thread_local_array); struct local_counter *lc = get_thread_local_counter(pc_key, &thread_local_array);
if (lc==NULL) { if (lc==NULL) {
XMALLOC(lc); // Might as well do the malloc without holding the pc lock. But most of the rest of this work needs the lock.
pc_lock();
// Set things up so that this thread terminates, the thread-local parts of the counter will be destroyed and merged into their respective counters. // Set things up so that this thread terminates, the thread-local parts of the counter will be destroyed and merged into their respective counters.
if (!thread_local_array_inited) { if (!thread_local_array_inited) {
pk_setspecific(thread_destructor_key, "dont care"); pk_setspecific(thread_destructor_key, "dont care");
...@@ -271,14 +274,11 @@ void increment_partitioned_counter(PARTITIONED_COUNTER pc, uint64_t amount) ...@@ -271,14 +274,11 @@ void increment_partitioned_counter(PARTITIONED_COUNTER pc, uint64_t amount)
all_thread_local_arrays.insert(&thread_local_ll_elt, &thread_local_array); all_thread_local_arrays.insert(&thread_local_ll_elt, &thread_local_array);
} }
XMALLOC(lc);
lc->sum = 0; lc->sum = 0;
HELGRIND_VALGRIND_HG_DISABLE_CHECKING(&lc->sum, sizeof(lc->sum)); // the counter increment is kind of racy. HELGRIND_VALGRIND_HG_DISABLE_CHECKING(&lc->sum, sizeof(lc->sum)); // the counter increment is kind of racy.
lc->owner_pc = pc; lc->owner_pc = pc;
lc->thread_local_array = &thread_local_array; lc->thread_local_array = &thread_local_array;
pc_lock(); // Might as well do the malloc without holding the pc lock. But the rest of this work needs the lock.
// Grow the array if needed, filling in NULLs // Grow the array if needed, filling in NULLs
while (thread_local_array.get_size() <= pc_key) { while (thread_local_array.get_size() <= pc_key) {
thread_local_array.push(NULL); thread_local_array.push(NULL);
......
...@@ -360,6 +360,7 @@ static void do_testit2 (void) ...@@ -360,6 +360,7 @@ static void do_testit2 (void)
// A thread increments the counter, then lets us know through a spin wait, then waits until we destroy the counter. // A thread increments the counter, then lets us know through a spin wait, then waits until we destroy the counter.
{ {
pthread_t t; pthread_t t;
HELGRIND_VALGRIND_HG_DISABLE_CHECKING(&spinwait, sizeof(spinwait)); // this is a racy volatile variable.
{ {
PARTITIONED_COUNTER mypc = create_partitioned_counter(); PARTITIONED_COUNTER mypc = create_partitioned_counter();
increment_partitioned_counter(mypc, 1); // make sure that the long-lived thread also increments the partitioned counter, to test for #5321. increment_partitioned_counter(mypc, 1); // make sure that the long-lived thread also increments the partitioned counter, to test for #5321.
......
...@@ -57,3 +57,16 @@ ...@@ -57,3 +57,16 @@
drd:ConflictingAccess drd:ConflictingAccess
fun:clone fun:clone
} }
{
<insert_a_suppression_name_here>
drd:ConflictingAccess
...
fun:_dl_runtime_resolve
}
{
<insert_a_suppression_name_here>
drd:ConflictingAccess
...
fun:random
}
...@@ -13,3 +13,19 @@ ...@@ -13,3 +13,19 @@
Helgrind:Race Helgrind:Race
fun:toku_get_checkpointing_user_data_status fun:toku_get_checkpointing_user_data_status
} }
{
ignore_race_inside_pthread_mutex_lock
Helgrind:Race
fun:pthread_mutex_lock
}
{
ignore_race_inside_pthread_mutex_unlock
Helgrind:Race
...
fun:pthread_mutex_unlock
}
{
ignore_race_inside_pthread_join
Helgrind:Race
fun:pthread_join
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment