Commit fec12a47 authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

#4137 remove atomic.h refs[t:4137]

git-svn-id: file:///svn/toku/tokudb@37157 c7de825b-a66e-492c-adef-691d508d4ae1
parent ede63bd2
......@@ -2,7 +2,6 @@
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#include <toku_portability.h>
#include <toku_atomic.h>
#include <unistd.h>
#include <errno.h>
#include <toku_assert.h>
......@@ -60,8 +59,8 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) {
int out_of_disk_space = 1;
assert(!out_of_disk_space); //Give an error message that might be useful if this is the only one that survives.
} else {
toku_sync_fetch_and_increment_uint64(&toku_write_enospc_total);
toku_sync_fetch_and_increment_uint32(&toku_write_enospc_current);
__sync_fetch_and_add(&toku_write_enospc_total, 1);
__sync_fetch_and_add(&toku_write_enospc_current, 1);
time_t tnow = time(0);
toku_write_enospc_last_time = tnow;
......@@ -89,7 +88,7 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) {
}
sleep(toku_write_enospc_sleep);
try_again = 1;
toku_sync_fetch_and_decrement_uint32(&toku_write_enospc_current);
__sync_fetch_and_sub(&toku_write_enospc_current, 1);
break;
}
}
......@@ -379,10 +378,10 @@ file_fsync_internal (int fd, uint64_t *duration_p) {
assert(rr==EINTR);
}
}
toku_sync_fetch_and_increment_uint64(&toku_fsync_count);
__sync_fetch_and_add(&toku_fsync_count, 1);
uint64_t duration;
duration = get_tnow() - tstart;
toku_sync_fetch_and_add_uint64(&toku_fsync_time, duration);
__sync_fetch_and_add(&toku_fsync_time, duration);
if (duration_p) *duration_p = duration;
return r;
}
......@@ -430,8 +429,8 @@ int
toku_file_fsync(int fd) {
uint64_t duration;
int r = file_fsync_internal (fd, &duration);
toku_sync_fetch_and_increment_uint64(&sched_fsync_count);
toku_sync_fetch_and_add_uint64(&sched_fsync_time, duration);
__sync_fetch_and_add(&sched_fsync_count, 1);
__sync_fetch_and_add(&sched_fsync_time, duration);
return r;
}
......
......@@ -5,7 +5,6 @@
#include "includes.h"
#include "sort.h"
#include "toku_atomic.h"
#include "threadpool.h"
#include <compress.h>
......@@ -1771,7 +1770,7 @@ toku_maybe_upgrade_brt(BRT t) { // possibly do some work to complete the version
if (r == 0 && upgrade) {
r = toku_brt_optimize_for_upgrade(t);
if (r==0)
toku_sync_fetch_and_increment_uint64(&upgrade_status.optimized_for_upgrade);
__sync_fetch_and_add(&upgrade_status.optimized_for_upgrade, 1);
}
if (r == 0) {
t->h->upgrade_brt_performed = TRUE; // no further upgrade necessary
......@@ -2210,7 +2209,7 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br
h->flags &= ~TOKU_DB_VALCMP_BUILTIN_13;
}
h->layout_version++;
toku_sync_fetch_and_increment_uint64(&upgrade_status.header_13); // how many header nodes upgraded from v13
__sync_fetch_and_add(&upgrade_status.header_13, 1); // how many header nodes upgraded from v13
upgrade++;
//Fall through on purpose
case BRT_LAYOUT_VERSION_14:
......
......@@ -108,7 +108,6 @@ Split_or_merge (node, childnum) {
#include "ule.h"
#include "xids.h"
#include "roll.h"
#include "toku_atomic.h"
#include "sub_block.h"
#include "sort.h"
......@@ -608,7 +607,7 @@ PAIR_ATTR make_brtnode_pair_attr(BRTNODE node) {
static uint64_t dict_id_serial = 1;
static DICTIONARY_ID
next_dict_id(void) {
uint64_t i = toku_sync_fetch_and_increment_uint64(&dict_id_serial);
uint64_t i = __sync_fetch_and_add(&dict_id_serial, 1);
assert(i); // guarantee unique dictionary id by asserting 64-bit counter never wraps
DICTIONARY_ID d = {.dictid = i};
return d;
......@@ -696,15 +695,15 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename
}
if (height == 0) { // statistics incremented only when disk I/O is done, so worth the threadsafe count
if (for_checkpoint)
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_leaf_for_checkpoint);
brt_status.disk_flush_leaf_for_checkpoint++;
else
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_leaf);
brt_status.disk_flush_leaf++;
}
else {
if (for_checkpoint)
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_nonleaf_for_checkpoint);
brt_status.disk_flush_nonleaf_for_checkpoint++;
else
(void) toku_sync_fetch_and_increment_uint64(&brt_status.disk_flush_nonleaf);
brt_status.disk_flush_nonleaf++;
}
}
//printf("%s:%d %p->mdict[0]=%p\n", __FILE__, __LINE__, brtnode, brtnode->mdicts[0]);
......@@ -1174,9 +1173,9 @@ void toku_brtnode_free (BRTNODE *nodep) {
toku_mempool_destroy(mp);
}
}
toku_sync_fetch_and_increment_uint64(&brt_status.destroy_leaf);
brt_status.destroy_leaf++;
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.destroy_nonleaf);
brt_status.destroy_nonleaf++;
}
toku_destroy_brtnode_internals(node);
toku_free(node);
......@@ -2194,7 +2193,7 @@ static int do_update(brt_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE b
if (cmd->type == BRT_UPDATE) {
// key is passed in with command (should be same as from le)
// update function extra is passed in with command
toku_sync_fetch_and_increment_uint64(&brt_status.updates);
brt_status.updates++;
keyp = cmd->u.id.key;
update_function_extra = cmd->u.id.val;
} else if (cmd->type == BRT_UPDATE_BROADCAST_ALL) {
......@@ -2203,7 +2202,7 @@ static int do_update(brt_update_func update_fun, DESCRIPTOR desc, BASEMENTNODE b
assert(le); // for broadcast updates, we just hit all leafentries
// so this cannot be null
assert(cmd->u.id.key->size == 0);
toku_sync_fetch_and_increment_uint64(&brt_status.updates_broadcast);
brt_status.updates_broadcast++;
keyp = toku_fill_dbt(&key, le_key(le), le_keylen(le));
update_function_extra = cmd->u.id.val;
} else {
......@@ -3197,22 +3196,22 @@ maybe_destroy_child_blbs(BRTNODE node, BRTNODE child)
static void
update_flush_status(BRTNODE UU(parent), BRTNODE child, int cascades)
{
toku_sync_fetch_and_increment_uint64(&brt_status.flush_total);
brt_status.flush_total++;
if (cascades > 0) {
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades);
brt_status.flush_cascades++;
switch (cascades) {
case 1:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_1); break;
brt_status.flush_cascades_1++; break;
case 2:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_2); break;
brt_status.flush_cascades_2++; break;
case 3:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_3); break;
brt_status.flush_cascades_3++; break;
case 4:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_4); break;
brt_status.flush_cascades_4++; break;
case 5:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_5); break;
brt_status.flush_cascades_5++; break;
default:
toku_sync_fetch_and_increment_uint64(&brt_status.flush_cascades_gt_5); break;
brt_status.flush_cascades_gt_5++; break;
}
}
bool flush_needs_io = false;
......@@ -3222,9 +3221,9 @@ update_flush_status(BRTNODE UU(parent), BRTNODE child, int cascades)
}
}
if (flush_needs_io) {
toku_sync_fetch_and_increment_uint64(&brt_status.flush_needed_io);
brt_status.flush_needed_io++;
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.flush_in_memory);
brt_status.flush_in_memory++;
}
}
......@@ -3495,7 +3494,7 @@ void toku_apply_cmd_to_leaf(
snapshot_txnids,
live_list_reverse);
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
brt_status.msn_discards++;
}
}
}
......@@ -3518,7 +3517,7 @@ void toku_apply_cmd_to_leaf(
live_list_reverse);
if (bn_made_change) *made_change = 1;
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
brt_status.msn_discards++;
}
}
}
......@@ -4637,7 +4636,7 @@ toku_brt_change_descriptor(
fd = toku_cachefile_get_and_pin_fd (t->cf);
r = toku_update_descriptor(t->h, &new_d, fd);
if (r == 0) // very infrequent operation, worth precise threadsafe count
(void) toku_sync_fetch_and_increment_uint64(&brt_status.descriptor_set);
brt_status.descriptor_set++;
toku_cachefile_unpin_fd(t->cf);
if (r!=0) goto cleanup;
......@@ -5817,7 +5816,7 @@ do_brt_leaf_put_cmd(BRT t, BASEMENTNODE bn, BRTNODE ancestor, int childnum, OMT
}
brt_leaf_put_cmd(t->compare_fun, t->update_fun, &t->h->descriptor, bn, &brtcmd, &made_change, &BP_WORKDONE(ancestor, childnum), snapshot_txnids, live_list_reverse);
} else {
toku_sync_fetch_and_increment_uint64(&brt_status.msn_discards);
brt_status.msn_discards++;
}
}
......
......@@ -13,8 +13,6 @@ extern "C" {
#endif
#endif
#include "toku_atomic.h"
static toku_pthread_mutex_t event_mutex = TOKU_PTHREAD_MUTEX_INITIALIZER;
static void lock_events(void) {
int r = toku_pthread_mutex_lock(&event_mutex); assert(r == 0);
......@@ -41,7 +39,6 @@ static int event_add_and_fetch(void) {
int r = ++event_count;
unlock_events();
return r;
// return toku_sync_increment_and_fetch_int32(&event_count);
}
static int do_user_errors = 0;
......@@ -115,9 +112,9 @@ static void reset_my_malloc_counts(void) {
__attribute__((__unused__))
static void *my_malloc(size_t n) {
(void) toku_sync_fetch_and_increment_int32(&my_malloc_count); // my_malloc_count++;
(void) __sync_fetch_and_add(&my_malloc_count, 1); // my_malloc_count++;
if (n >= my_big_malloc_limit) {
(void) toku_sync_fetch_and_increment_int32(&my_big_malloc_count); // my_big_malloc_count++;
(void) __sync_fetch_and_add(&my_big_malloc_count, 1); // my_big_malloc_count++;
if (do_malloc_errors) {
if (event_add_and_fetch() == event_count_trigger) {
event_hit();
......@@ -133,9 +130,9 @@ static int do_realloc_errors = 0;
__attribute__((__unused__))
static void *my_realloc(void *p, size_t n) {
(void) toku_sync_increment_and_fetch_int32(&my_realloc_count); // my_realloc_count++;
(void) __sync_fetch_and_add(&my_realloc_count, 1); // my_realloc_count++;
if (n >= my_big_malloc_limit) {
(void) toku_sync_increment_and_fetch_int32(&my_big_realloc_count); // my_big_realloc_count++;
(void) __sync_fetch_and_add(&my_big_realloc_count, 1); // my_big_realloc_count++;
if (do_realloc_errors) {
if (event_add_and_fetch() == event_count_trigger) {
event_hit();
......
......@@ -6,7 +6,6 @@
#include <stdio.h>
#include <unistd.h>
#include "checkpoint.h"
#include "toku_atomic.h"
static int N; // how many items in the table
static CACHEFILE cf;
......@@ -50,9 +49,9 @@ flush (
int *v = value;
if (*v!=expect_value) printf("got %d expect %d\n", *v, expect_value);
assert(*v==expect_value);
(void)toku_sync_fetch_and_increment_int32(&n_flush);
if (write_me) (void)toku_sync_fetch_and_increment_int32(&n_write_me);
if (keep_me) (void)toku_sync_fetch_and_increment_int32(&n_keep_me);
(void)__sync_fetch_and_add(&n_flush, 1);
if (write_me) (void)__sync_fetch_and_add(&n_write_me, 1);
if (keep_me) (void)__sync_fetch_and_add(&n_keep_me, 1);
sleep_random();
}
......
......@@ -15,7 +15,6 @@
#include "le-cursor.h"
#include "indexer.h"
#include "brt-internal.h"
#include "toku_atomic.h"
#include "tokuconst.h"
#include "brt.h"
#include "leafentry.h"
......
......@@ -17,7 +17,6 @@
#include "ydb-internal.h"
#include "le-cursor.h"
#include "indexer.h"
#include "toku_atomic.h"
#include "tokuconst.h"
#include "brt.h"
#include "leafentry.h"
......@@ -176,13 +175,13 @@ toku_indexer_create_indexer(DB_ENV *env,
*indexerp = indexer;
(void) toku_sync_fetch_and_increment_uint64(&status.create);
(void) toku_sync_fetch_and_increment_uint32(&status.current);
(void) __sync_fetch_and_add(&status.create, 1);
(void) __sync_fetch_and_add(&status.current, 1);
if ( status.current > status.max )
status.max = status.current; // not worth a lock to make threadsafe, may be inaccurate
} else {
(void) toku_sync_fetch_and_increment_uint64(&status.create_fail);
(void) __sync_fetch_and_add(&status.create_fail, 1);
free_indexer(indexer);
}
......@@ -271,9 +270,9 @@ build_index(DB_INDEXER *indexer) {
// - unique checks?
if ( result == 0 ) {
(void) toku_sync_fetch_and_increment_uint64(&status.build);
(void) __sync_fetch_and_add(&status.build, 1);
} else {
(void) toku_sync_fetch_and_increment_uint64(&status.build_fail);
(void) __sync_fetch_and_add(&status.build_fail, 1);
}
......@@ -283,7 +282,7 @@ build_index(DB_INDEXER *indexer) {
static int
close_indexer(DB_INDEXER *indexer) {
int r = 0;
(void) toku_sync_fetch_and_decrement_uint32(&status.current);
(void) __sync_fetch_and_sub(&status.current, 1);
toku_ydb_lock();
{
......@@ -308,17 +307,17 @@ close_indexer(DB_INDEXER *indexer) {
toku_ydb_unlock();
if ( r == 0 ) {
(void) toku_sync_fetch_and_increment_uint64(&status.close);
(void) __sync_fetch_and_add(&status.close, 1);
} else {
(void) toku_sync_fetch_and_increment_uint64(&status.close_fail);
(void) __sync_fetch_and_add(&status.close_fail, 1);
}
return r;
}
static int
abort_indexer(DB_INDEXER *indexer) {
(void) toku_sync_fetch_and_decrement_uint32(&status.current);
(void) toku_sync_fetch_and_increment_uint64(&status.abort);
(void) __sync_fetch_and_sub(&status.current, 1);
(void) __sync_fetch_and_add(&status.abort, 1);
toku_ydb_lock();
{
......
......@@ -20,7 +20,6 @@
#include "ydb_load.h"
#include "checkpoint.h"
#include "brt-internal.h"
#include "toku_atomic.h"
#define lazy_assert(a) assert(a) // indicates code is incomplete
......@@ -254,13 +253,13 @@ int toku_loader_create_loader(DB_ENV *env,
create_exit:
loader_add_refs(loader);
if (rval == 0) {
(void) toku_sync_fetch_and_increment_uint64(&status.create);
(void) toku_sync_fetch_and_increment_uint32(&status.current);
(void) __sync_fetch_and_add(&status.create, 1);
(void) __sync_fetch_and_add(&status.current, 1);
if (status.current > status.max)
status.max = status.current; // not worth a lock to make threadsafe, may be inaccurate
}
else {
(void) toku_sync_fetch_and_increment_uint64(&status.create_fail);
(void) __sync_fetch_and_add(&status.create_fail, 1);
free_loader(loader);
}
return rval;
......@@ -345,7 +344,7 @@ int toku_loader_put(DB_LOADER *loader, DBT *key, DBT *val)
int toku_loader_close(DB_LOADER *loader)
{
(void) toku_sync_fetch_and_decrement_uint32(&status.current);
(void) __sync_fetch_and_sub(&status.current, 1);
int r=0;
if ( loader->i->err_errno != 0 ) {
if ( loader->i->error_callback != NULL ) {
......@@ -381,16 +380,16 @@ int toku_loader_close(DB_LOADER *loader)
free_loader(loader);
toku_ydb_unlock();
if (r==0)
(void) toku_sync_fetch_and_increment_uint64(&status.close);
(void) __sync_fetch_and_add(&status.close, 1);
else
(void) toku_sync_fetch_and_increment_uint64(&status.close_fail);
(void) __sync_fetch_and_add(&status.close_fail, 1);
return r;
}
int toku_loader_abort(DB_LOADER *loader)
{
(void) toku_sync_fetch_and_decrement_uint32(&status.current);
(void) toku_sync_fetch_and_increment_uint64(&status.abort);
(void) __sync_fetch_and_sub(&status.current, 1);
(void) __sync_fetch_and_add(&status.abort, 1);
int r=0;
if ( loader->i->err_errno != 0 ) {
if ( loader->i->error_callback != NULL ) {
......
......@@ -21,7 +21,6 @@
#include "test.h"
#include "toku_pthread.h"
#include "toku_atomic.h"
#include <db.h>
#include <sys/stat.h>
#include "ydb.h"
......
......@@ -8,7 +8,6 @@
#include "test.h"
#include "toku_pthread.h"
#include "toku_atomic.h"
#include <db.h>
#include <sys/stat.h>
#include "ydb-internal.h"
......
......@@ -17,7 +17,6 @@
#include "test.h"
#include "toku_pthread.h"
#include "toku_atomic.h"
#include <db.h>
#include <sys/stat.h>
#include "ydb-internal.h"
......
......@@ -40,7 +40,6 @@
#include "test.h"
#include "toku_pthread.h"
#include "toku_atomic.h"
#include <db.h>
#include <sys/stat.h>
#include "ydb-internal.h"
......
......@@ -6,7 +6,6 @@
#include "test.h"
#include "toku_pthread.h"
#include "toku_atomic.h"
#include <db.h>
#include <sys/stat.h>
#include "ydb-internal.h"
......
#if !defined(TOKU_ATOMIC_H)
#define TOKU_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
static inline uint32_t
toku_sync_fetch_and_add_uint32(volatile uint32_t *a, uint32_t b) {
// icc previously required _InterlockedExchangeAdd((LONG*)a, b);
return __sync_fetch_and_add(a, b);
}
static inline uint32_t toku_sync_fetch_and_increment_uint32(volatile uint32_t *a) {
// ICC has an _InterlockedIncrement function that returns the new result. We'll just use our primitive.
return toku_sync_fetch_and_add_uint32(a, 1);
}
static inline uint32_t toku_sync_fetch_and_decrement_uint32(volatile uint32_t *a) {
return toku_sync_fetch_and_add_uint32(a, -1);
}
static inline int32_t toku_sync_fetch_and_add_int32(volatile int32_t *a, int32_t b) {
return __sync_fetch_and_add(a, b);
}
static inline int32_t toku_sync_fetch_and_increment_int32(volatile int32_t *a) {
return toku_sync_fetch_and_add_int32(a, 1);
}
static inline int32_t toku_sync_fetch_and_decrement_int32(volatile int32_t *a) {
return toku_sync_fetch_and_add_int32(a, -1);
}
static inline int32_t toku_sync_add_and_fetch_int32(volatile int32_t *a, int32_t b) {
return __sync_add_and_fetch(a, b);
}
static inline int32_t toku_sync_increment_and_fetch_int32(volatile int32_t *a) {
return __sync_add_and_fetch(a, 1);
}
static inline int32_t toku_sync_decrement_and_fetch_int32(volatile int32_t *a) {
return __sync_add_and_fetch(a, -1);
}
#if __GNUC__ && __i386__
// workaround for a gcc 4.1.2 bug on 32 bit platforms.
static uint64_t toku_sync_fetch_and_add_uint64(volatile uint64_t *a, uint64_t b) __attribute__((noinline), (unused)) {
return __sync_fetch_and_add(a, b);
}
#else
static inline uint64_t toku_sync_fetch_and_add_uint64(volatile uint64_t *a, uint64_t b) {
return __sync_fetch_and_add(a, b);
}
#endif
static inline uint64_t toku_sync_fetch_and_increment_uint64(volatile uint64_t *a) {
return toku_sync_fetch_and_add_uint64(a, 1);
}
#define TOKU_WINDOWS_MIN_SUPPORTED_IS_VISTA 0
//Vista has 64 bit atomic instruction functions.
//64 bit windows should also have it, but we're using neither right now.
#if TOKU_WINDOWS_MIN_SUPPORTED_IS_VISTA || TOKU_WINDOWS_64
#define TOKU_WINDOWS_HAS_ATOMIC_64 1
#else
#define TOKU_WINDOWS_HAS_ATOMIC_64 0
#endif
#ifdef __cplusplus
}
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment