diff --git a/linux/file.c b/linux/file.c index b6ab29b3bec0ee4acd20fd2841c34656807003aa..e024cf2fae75b80b792725360bb834637cedf73e 100644 --- a/linux/file.c +++ b/linux/file.c @@ -114,7 +114,7 @@ toku_file_fsync(int fd) { if (r) assert(errno==EINTR); } - toku_sync_fetch_and_add_uint64(&toku_fsync_count, 1); + toku_sync_fetch_and_increment_uint64(&toku_fsync_count); toku_sync_fetch_and_add_uint64(&toku_fsync_time, get_tnow() - tstart); return r; } diff --git a/newbrt/tests/cachetable-checkpoint-pending.c b/newbrt/tests/cachetable-checkpoint-pending.c index 9ab5d4d234a0fb9ecbc7169d53defd570340e267..6bc7af69d52e31a643e01d4f752e3780b43a2d81 100644 --- a/newbrt/tests/cachetable-checkpoint-pending.c +++ b/newbrt/tests/cachetable-checkpoint-pending.c @@ -39,9 +39,9 @@ flush (CACHEFILE UU(thiscf), CACHEKEY UU(key), void *value, void *UU(extraargs), int *v = value; if (*v!=expect_value) printf("got %d expect %d\n", *v, expect_value); assert(*v==expect_value); - (void)toku_sync_fetch_and_add_int32(&n_flush, 1); - if (write_me) (void)toku_sync_fetch_and_add_int32(&n_write_me, 1); - if (keep_me) (void)toku_sync_fetch_and_add_int32(&n_keep_me, 1); + (void)toku_sync_fetch_and_increment_int32(&n_flush); + if (write_me) (void)toku_sync_fetch_and_increment_int32(&n_write_me); + if (keep_me) (void)toku_sync_fetch_and_increment_int32(&n_keep_me); sleep_random(); } diff --git a/src/elocks.c b/src/elocks.c index ac4a031190c71c26a9e908a8a0509ce3986e662f..b4b23ef1b1983c7fd5640d48729fce6fbc6e6a84 100644 --- a/src/elocks.c +++ b/src/elocks.c @@ -151,7 +151,7 @@ toku_ydb_lock(void) { memset(ydbtime, 0, sizeof (struct ydbtime)); r = toku_pthread_setspecific(ydb_big_lock.time_key, ydbtime); assert(r == 0); - (void) toku_sync_fetch_and_add_uint64(&status.total_clients, 1); + (void) toku_sync_fetch_and_increment_uint64(&status.total_clients); } if (ydbtime->tacquire) { // delay the thread if the lock acquire time is set and is less than the current time if (0) printf("%"PRIu64"\n", ydbtime->tacquire); @@ -162,21 +162,23 @@ toku_ydb_lock(void) { // put an upper bound on the sleep time since the timestamps may be crazy due to thread movement between cpu's or cpu frequency changes if (t > MAX_SLEEP) { t = MAX_SLEEP; - (void) toku_sync_fetch_and_add_uint64(&status.times_max_sleep_used, 1); + (void) toku_sync_fetch_and_increment_uint64(&status.times_max_sleep_used); } +#if !TOKU_WINDOWS || TOKU_WINDOWS_HAS_FAST_ATOMIC_64 (void) toku_sync_fetch_and_add_uint64(&status.total_sleep_time, t); - (void) toku_sync_fetch_and_add_uint64(&status.total_sleepers, 1); +#endif + (void) toku_sync_fetch_and_increment_uint64(&status.total_sleepers); usleep(t); } } r = toku_pthread_mutex_trylock(&ydb_big_lock.lock); if (r != 0) { // if we can not get the lock, bump the count of the lock waits, and block on the lock assert(r == EBUSY); - (void) toku_sync_fetch_and_add_int32(&ydb_big_lock.waiters, 1); - (void) toku_sync_fetch_and_add_uint64(&status.total_waiters, 1); + (void) toku_sync_fetch_and_increment_int32(&ydb_big_lock.waiters); + (void) toku_sync_fetch_and_increment_uint64(&status.total_waiters); r = toku_pthread_mutex_lock(&ydb_big_lock.lock); assert(r == 0); - (void) toku_sync_fetch_and_add_int32(&ydb_big_lock.waiters, -1); + (void) toku_sync_fetch_and_decrement_int32(&ydb_big_lock.waiters); } status.max_requested_sleep = u64max(status.max_requested_sleep, requested_sleep); toku_cachetable_get_miss_times(NULL, &ydb_big_lock.start_miss_count, &ydb_big_lock.start_miss_time);