Commit 371ab00b authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul

Rename the last bunch of symbols to toku_symbols. Closes #8.

git-svn-id: file:///svn/tokudb@829 c7de825b-a66e-492c-adef-691d508d4ae1
parent 5da5a9fc
......@@ -90,9 +90,9 @@ ybt.o: ybt.h brttypes.h ../include/db.h
ybt-test: ybt-test.o ybt.o memory.o
ybt-test.o: ybt.h ../include/db.h
cachetable.o: cachetable.h hashfun.h memory.h
brt-test: ybt.o brt.o hashtable.o pma.o memory.o brt-serialize.o cachetable.o header-io.o ybt.o key.o primes.o log.o mempool.o brt-verify.o fingerprint.o log_code.o
brt-test: ybt.o brt.o hashtable.o pma.o memory.o brt-serialize.o cachetable.o ybt.o key.o primes.o log.o mempool.o brt-verify.o fingerprint.o log_code.o
log.o: log_header.h log-internal.h log.h wbuf.h crc.h brttypes.h $(BRT_INTERNAL_H_INCLUDES)
brt-test.o brt.o: brt.h ../include/db.h hashtable.h pma.h brttypes.h cachetable.h
brt-test.o brt.o: brt.h ../include/db.h hashtable.h pma.h brttypes.h cachetable.h memory.h
brt-serialize-test.o: $(BRT_INTERNAL_H_INCLUDES)
brt.o: $(BRT_INTERNAL_H_INCLUDES) key.h log_header.h
mdict.o: pma.h
......@@ -101,7 +101,6 @@ memory.o: memory.h
primes.o: primes.h
hashtest: hashtable.o memory.o primes.o
brt-serialize.o: $(BRT_INTERNAL_H_INCLUDES) key.h wbuf.h rbuf.h
header-io.o: $(BRT_INTERNAL_H_INCLUDES)
mdict-test: hashtable.o pma.o memory.o
brt-bigtest: memory.o ybt.o brt.o pma.o cachetable.o key.o hashtable.o brt-serialize.o
brt-bigtest.o: brt.h ../include/db.h
......
......@@ -12,7 +12,7 @@
#include <sys/time.h>
#include <unistd.h>
const char fname[]="sinsert.brt";
static const char fname[]="sinsert.brt";
enum { SERIAL_SPACING = 1<<6 };
enum { ITEMS_TO_INSERT_PER_ITERATION = 1<<20 };
......@@ -21,34 +21,34 @@ enum { BOUND_INCREASE_PER_ITERATION = SERIAL_SPACING*ITEMS_TO_INSERT_PER_ITERATI
enum { NODE_SIZE = 1<<20 };
int nodesize = NODE_SIZE;
int keysize = sizeof (long long);
int valsize = sizeof (long long);
int do_verify =0; /* Do a slow verify after every insert. */
static int nodesize = NODE_SIZE;
static int keysize = sizeof (long long);
static int valsize = sizeof (long long);
static int do_verify =0; /* Do a slow verify after every insert. */
CACHETABLE ct;
BRT t;
static CACHETABLE ct;
static BRT t;
void setup (void) {
static void setup (void) {
int r;
unlink(fname);
r = toku_brt_create_cachetable(&ct, 0, ZERO_LSN, NULL_LOGGER); assert(r==0);
r = toku_open_brt(fname, 0, 1, &t, nodesize, ct, NULL_TXN, toku_default_compare_fun, (DB*)0); assert(r==0);
}
void shutdown (void) {
static void shutdown (void) {
int r;
r = toku_close_brt(t); assert(r==0);
r = toku_cachetable_close(&ct); assert(r==0);
}
void long_long_to_array (unsigned char *a, unsigned long long l) {
static void long_long_to_array (unsigned char *a, unsigned long long l) {
int i;
for (i=0; i<8; i++)
a[i] = (l>>(56-8*i))&0xff;
}
void insert (long long v) {
static void insert (long long v) {
unsigned char kc[keysize], vc[valsize];
DBT kt, vt;
memset(kc, 0, sizeof kc);
......@@ -59,18 +59,18 @@ void insert (long long v) {
if (do_verify) toku_cachetable_verify(ct);
}
void serial_insert_from (long long from) {
static void serial_insert_from (long long from) {
long long i;
for (i=0; i<ITEMS_TO_INSERT_PER_ITERATION; i++) {
insert((from+i)*SERIAL_SPACING);
}
}
long long llrandom (void) {
static long long llrandom (void) {
return (((long long)(random()))<<32) + random();
}
void random_insert_below (long long below) {
static void random_insert_below (long long below) {
long long i;
assert(0 < below);
for (i=0; i<ITEMS_TO_INSERT_PER_ITERATION; i++) {
......@@ -78,11 +78,11 @@ void random_insert_below (long long below) {
}
}
double tdiff (struct timeval *a, struct timeval *b) {
static double tdiff (struct timeval *a, struct timeval *b) {
return (a->tv_sec-b->tv_sec)+1e-6*(a->tv_usec-b->tv_usec);
}
void biginsert (long long n_elements, struct timeval *starttime) {
static void biginsert (long long n_elements, struct timeval *starttime) {
long long i;
struct timeval t1,t2;
int iteration;
......@@ -100,7 +100,7 @@ void biginsert (long long n_elements, struct timeval *starttime) {
}
}
void usage() {
static void usage() {
printf("benchmark-test [--nodesize NODESIZE] [--keysize KEYSIZE] [--valsize VALSIZE] [--verify] [TOTALITEMS]\n");
}
......
......@@ -10,7 +10,7 @@
#include <arpa/inet.h>
#include <stdlib.h>
void test_serialize(void) {
static void test_serialize(void) {
// struct brt source_brt;
int nodesize = 1024;
struct brtnode sn, *dn;
......
......@@ -3,6 +3,7 @@
#include "brt.h"
#include "key.h"
#include "pma.h"
#include "memory.h"
#include <assert.h>
......@@ -14,10 +15,8 @@
#include <unistd.h>
#include <arpa/inet.h>
TOKUTXN const null_txn = 0;
DB * const null_db = 0;
extern long long n_items_malloced;
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
static void test0 (void) {
BRT t;
......@@ -609,10 +608,8 @@ static void test_read_what_was_written (void) {
printf(" ok\n");
}
extern void toku_pma_show_stats (void);
/* Test c_get(DB_LAST) on an empty tree */
void test_cursor_last_empty(void) {
static void test_cursor_last_empty(void) {
const char *n="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -645,7 +642,7 @@ void test_cursor_last_empty(void) {
toku_memory_check_all_free();
}
void test_cursor_next (void) {
static void test_cursor_next (void) {
const char *n="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -696,9 +693,9 @@ void test_cursor_next (void) {
}
DB nonce_db;
static DB nonce_db;
int wrong_compare_fun(DB *db, const DBT *a, const DBT *b) {
static int wrong_compare_fun(DB *db, const DBT *a, const DBT *b) {
unsigned int i;
unsigned char *ad=a->data;
unsigned char *bd=b->data;
......@@ -816,13 +813,13 @@ static void test_wrongendian_compare (int wrong_p, unsigned int N) {
toku_memory_check_all_free();
}
int test_cursor_debug = 0;
static int test_cursor_debug = 0;
int test_brt_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) {
static int test_brt_cursor_keycompare(DB *db __attribute__((unused)), const DBT *a, const DBT *b) {
return toku_keycompare(a->data, a->size, b->data, b->size);
}
void assert_cursor_notfound(BRT brt, int position) {
static void assert_cursor_notfound(BRT brt, int position) {
BRT_CURSOR cursor;
int r;
DBT kbt, vbt;
......@@ -839,7 +836,7 @@ void assert_cursor_notfound(BRT brt, int position) {
assert(r==0);
}
void assert_cursor_value(BRT brt, int position, long long value) {
static void assert_cursor_value(BRT brt, int position, long long value) {
BRT_CURSOR cursor;
int r;
DBT kbt, vbt;
......@@ -865,7 +862,7 @@ void assert_cursor_value(BRT brt, int position, long long value) {
assert(r==0);
}
void assert_cursor_first_last(BRT brt, long long firstv, long long lastv) {
static void assert_cursor_first_last(BRT brt, long long firstv, long long lastv) {
BRT_CURSOR cursor;
int r;
DBT kbt, vbt;
......@@ -904,7 +901,7 @@ void assert_cursor_first_last(BRT brt, long long firstv, long long lastv) {
assert(r==0);
}
void test_brt_cursor_first(int n, DB *db) {
static void test_brt_cursor_first(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -946,7 +943,7 @@ void test_brt_cursor_first(int n, DB *db) {
assert(r==0);
}
void test_brt_cursor_last(int n, DB *db) {
static void test_brt_cursor_last(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -988,7 +985,7 @@ void test_brt_cursor_last(int n, DB *db) {
assert(r==0);
}
void test_brt_cursor_first_last(int n, DB *db) {
static void test_brt_cursor_first_last(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1034,7 +1031,7 @@ void test_brt_cursor_first_last(int n, DB *db) {
}
void test_brt_cursor_rfirst(int n, DB *db) {
static void test_brt_cursor_rfirst(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1077,7 +1074,7 @@ void test_brt_cursor_rfirst(int n, DB *db) {
assert(r==0);
}
void assert_cursor_walk(BRT brt, int n) {
static void assert_cursor_walk(BRT brt, int n) {
BRT_CURSOR cursor;
int i;
int r;
......@@ -1109,7 +1106,7 @@ void assert_cursor_walk(BRT brt, int n) {
assert(r==0);
}
void test_brt_cursor_walk(int n, DB *db) {
static void test_brt_cursor_walk(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1150,7 +1147,7 @@ void test_brt_cursor_walk(int n, DB *db) {
}
void assert_cursor_rwalk(BRT brt, int n) {
static void assert_cursor_rwalk(BRT brt, int n) {
BRT_CURSOR cursor;
int i;
int r;
......@@ -1182,7 +1179,7 @@ void assert_cursor_rwalk(BRT brt, int n) {
assert(r==0);
}
void test_brt_cursor_rwalk(int n, DB *db) {
static void test_brt_cursor_rwalk(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1223,7 +1220,7 @@ void test_brt_cursor_rwalk(int n, DB *db) {
}
void assert_cursor_walk_inorder(BRT brt, int n) {
static void assert_cursor_walk_inorder(BRT brt, int n) {
BRT_CURSOR cursor;
int i;
int r;
......@@ -1261,7 +1258,7 @@ void assert_cursor_walk_inorder(BRT brt, int n) {
assert(r==0);
}
void test_brt_cursor_rand(int n, DB *db) {
static void test_brt_cursor_rand(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1311,7 +1308,7 @@ void test_brt_cursor_rand(int n, DB *db) {
}
void test_brt_cursor_split(int n, DB *db) {
static void test_brt_cursor_split(int n, DB *db) {
const char *fname="testbrt.brt";
CACHETABLE ct;
BRT brt;
......@@ -1392,7 +1389,7 @@ void test_brt_cursor_split(int n, DB *db) {
assert(r==0);
}
void test_multiple_brt_cursors(int n, DB *db) {
static void test_multiple_brt_cursors(int n, DB *db) {
printf("test_multiple_brt_cursors:%d %p\n", n, db);
int r;
......@@ -1437,7 +1434,7 @@ static int log16(int n) {
return r;
}
void test_multiple_brt_cursor_walk(int n, DB *db) {
static void test_multiple_brt_cursor_walk(int n, DB *db) {
printf("test_multiple_brt_cursor_walk:%d %p\n", n, db);
int r;
......@@ -1524,7 +1521,7 @@ void test_multiple_brt_cursor_walk(int n, DB *db) {
assert(r==0);
}
void test_brt_cursor_set(int n, int cursor_op, DB *db) {
static void test_brt_cursor_set(int n, int cursor_op, DB *db) {
printf("test_brt_cursor_set:%d %d %p\n", n, cursor_op, db);
int r;
......@@ -1597,7 +1594,7 @@ void test_brt_cursor_set(int n, int cursor_op, DB *db) {
assert(r==0);
}
void test_brt_cursor_set_range(int n, DB *db) {
static void test_brt_cursor_set_range(int n, DB *db) {
printf("test_brt_cursor_set_range:%d %p\n", n, db);
int r;
......@@ -1664,7 +1661,7 @@ void test_brt_cursor_set_range(int n, DB *db) {
assert(r==0);
}
void test_brt_cursor_delete(int n, DB *db) {
static void test_brt_cursor_delete(int n, DB *db) {
printf("test_brt_cursor_delete:%d %p\n", n, db);
int error;
......@@ -1726,7 +1723,7 @@ void test_brt_cursor_delete(int n, DB *db) {
assert(error == 0);
}
void test_brt_cursor_get_both(int n, DB *db) {
static void test_brt_cursor_get_both(int n, DB *db) {
printf("test_brt_cursor_get_both:%d %p\n", n, db);
int error;
......@@ -1831,10 +1828,10 @@ void test_brt_cursor_get_both(int n, DB *db) {
}
int test_brt_cursor_inc = 1000;
int test_brt_cursor_limit = 10000;
static int test_brt_cursor_inc = 1000;
static int test_brt_cursor_limit = 10000;
void test_brt_cursor(DB *db) {
static void test_brt_cursor(DB *db) {
int n;
test_multiple_brt_cursors(1, db);
......@@ -1879,7 +1876,7 @@ void test_brt_cursor(DB *db) {
test_brt_cursor_get_both(1000, db); toku_memory_check_all_free();
}
void test_large_kv(int bsize, int ksize, int vsize) {
static void test_large_kv(int bsize, int ksize, int vsize) {
BRT t;
int r;
CACHETABLE ct;
......@@ -1914,7 +1911,7 @@ void test_large_kv(int bsize, int ksize, int vsize) {
* test the key and value limits
* the current implementation crashes when kvsize == bsize/2 rather than fails
*/
void test_brt_limits() {
static void test_brt_limits() {
int bsize = 1024;
int kvsize = 4;
while (kvsize < bsize/2) {
......@@ -1926,7 +1923,7 @@ void test_brt_limits() {
/*
* verify that a delete on an empty tree fails
*/
void test_brt_delete_empty() {
static void test_brt_delete_empty() {
printf("test_brt_delete_empty\n");
BRT t;
......@@ -1954,7 +1951,7 @@ void test_brt_delete_empty() {
* insert n keys, delete all n keys, verify that lookups for all the keys fail,
* verify that a cursor walk of the tree finds nothing
*/
void test_brt_delete_present(int n) {
static void test_brt_delete_present(int n) {
printf("test_brt_delete_present:%d\n", n);
BRT t;
......@@ -2016,7 +2013,7 @@ void test_brt_delete_present(int n) {
r = toku_cachetable_close(&ct); assert(r==0);
}
void test_brt_delete_not_present(int n) {
static void test_brt_delete_not_present(int n) {
printf("test_brt_delete_not_present:%d\n", n);
BRT t;
......@@ -2063,7 +2060,7 @@ void test_brt_delete_not_present(int n) {
r = toku_cachetable_close(&ct); assert(r==0);
}
void test_brt_delete_cursor_first(int n) {
static void test_brt_delete_cursor_first(int n) {
printf("test_brt_delete_cursor_first:%d\n", n);
BRT t;
......@@ -2155,7 +2152,7 @@ void test_brt_delete_cursor_first(int n) {
build a 2 level tree, and expect the last insertion to be
buffered. then delete and lookup. */
void test_insert_delete_lookup(int n) {
static void test_insert_delete_lookup(int n) {
printf("test_insert_delete_lookup:%d\n", n);
BRT t;
......@@ -2199,7 +2196,7 @@ void test_insert_delete_lookup(int n) {
r = toku_cachetable_close(&ct); assert(r==0);
}
void test_brt_delete() {
static void test_brt_delete() {
test_brt_delete_empty(); toku_memory_check_all_free();
test_brt_delete_present(1); toku_memory_check_all_free();
test_brt_delete_present(100); toku_memory_check_all_free();
......
......@@ -170,7 +170,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, DISKOFF nodename, void *b
#if 0
if (rename_p) {
DISKOFF newnodename = malloc_diskblock(brtnode->brt, brtnode->nodesize);
int r=tokulogger_log_block_rename(cachefile_logger(cachefile), cachefile_filenum(cachefile), nodename, newnodename, parent->thisnodename, i);
int r=toku_logger_log_block_rename(cachefile_logger(cachefile), cachefile_filenum(cachefile), nodename, newnodename, parent->thisnodename, i);
assert(r!=0); // !!! This error should be handled better (e.g., what if the disk fills up)
// !!! Don't forget to free the old node (sometime after some future checkpoint. TODO!!!)
brtnode->thisnodename=newnodename;
......@@ -1542,10 +1542,10 @@ int toku_brt_open(BRT t, const char *fname, const char *fname_in_env, const char
t->database_name=0;
goto died0a;
}
tokulogger_log_fcreate(txn, fname_in_env, 0777);
toku_logger_log_fcreate(txn, fname_in_env, 0777);
}
r=toku_cachetable_openfd(&t->cf, cachetable, fd);
tokulogger_log_fopen(txn, fname_in_env, toku_cachefile_filenum(t->cf));
toku_logger_log_fopen(txn, fname_in_env, toku_cachefile_filenum(t->cf));
}
if (r!=0) {
if (0) { died1: toku_cachefile_close(&t->cf); }
......@@ -1582,7 +1582,7 @@ int toku_brt_open(BRT t, const char *fname, const char *fname_in_env, const char
t->h->names=0;
t->h->roots=0;
}
if ((r=tokulogger_log_header(txn, toku_cachefile_filenum(t->cf), t->h))) { goto died6; }
if ((r=toku_logger_log_header(txn, toku_cachefile_filenum(t->cf), t->h))) { goto died6; }
if ((r=setup_brt_root_node(t, t->nodesize, txn))!=0) { died6: if (dbname) goto died5; else goto died2; }
if ((r=toku_cachetable_put(t->cf, 0, t->h, 0, toku_brtheader_flush_callback, toku_brtheader_fetch_callback, 0))) { goto died6; }
} else {
......
......@@ -11,15 +11,15 @@
#include "memory.h"
#include "cachetable.h"
const int test_object_size = 1;
static const int test_object_size = 1;
struct item {
CACHEKEY key;
char *something;
};
int expect_n_flushes=0;
CACHEKEY flushes[100];
static int expect_n_flushes=0;
static CACHEKEY flushes[100];
static void expect1(CACHEKEY key) {
expect_n_flushes=1;
......@@ -29,7 +29,7 @@ static void expectN(CACHEKEY key) {
flushes[expect_n_flushes++]=key;
}
CACHEFILE expect_f;
static CACHEFILE expect_f;
static void flush (CACHEFILE f, CACHEKEY key, void*value, long size __attribute__((__unused__)), BOOL write_me __attribute__((__unused__)), BOOL keep_me __attribute__((__unused__)), LSN modified_lsn __attribute__((__unused__)), BOOL rename_p __attribute__((__unused__))) {
struct item *it = value;
......@@ -55,15 +55,15 @@ static void flush (CACHEFILE f, CACHEKEY key, void*value, long size __attribute_
toku_free(value);
}
struct item *make_item (CACHEKEY key) {
static struct item *make_item (CACHEKEY key) {
struct item *MALLOC(it);
it->key=key;
it->something="something";
return it;
}
CACHEKEY did_fetch=-1;
int fetch (CACHEFILE f, CACHEKEY key, void**value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
static CACHEKEY did_fetch=-1;
static int fetch (CACHEFILE f, CACHEKEY key, void**value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
printf("Fetch %lld\n", key);
assert (expect_f==f);
assert((long)extraargs==23);
......@@ -74,7 +74,7 @@ int fetch (CACHEFILE f, CACHEKEY key, void**value, long *sizep __attribute__((__
}
void test0 (void) {
static void test0 (void) {
void* t3=(void*)23;
CACHETABLE t;
CACHEFILE f;
......@@ -196,7 +196,7 @@ static int fetch_n (CACHEFILE f __attribute__((__unused__)), CACHEKEY key __attr
}
void test_nested_pin (void) {
static void test_nested_pin (void) {
void *f2=(void*)42;
CACHETABLE t;
CACHEFILE f;
......@@ -233,7 +233,7 @@ void test_nested_pin (void) {
}
void null_flush (CACHEFILE cf __attribute__((__unused__)),
static void null_flush (CACHEFILE cf __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
long size __attribute__((__unused__)),
......@@ -242,13 +242,13 @@ void null_flush (CACHEFILE cf __attribute__((__unused__)),
LSN modified_lsn __attribute__((__unused__)),
BOOL rename_p __attribute__((__unused__))) {
}
int add123_fetch (CACHEFILE cf __attribute__((__unused__)), CACHEKEY key, void **value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
static int add123_fetch (CACHEFILE cf __attribute__((__unused__)), CACHEKEY key, void **value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
assert((long)extraargs==123);
*value = (void*)((unsigned long)key+123L);
written_lsn->lsn = 0;
return 0;
}
int add222_fetch (CACHEFILE cf __attribute__((__unused__)), CACHEKEY key, void **value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
static int add222_fetch (CACHEFILE cf __attribute__((__unused__)), CACHEKEY key, void **value, long *sizep __attribute__((__unused__)), void*extraargs, LSN *written_lsn) {
assert((long)extraargs==222);
*value = (void*)((unsigned long)key+222L);
written_lsn->lsn = 0;
......@@ -256,7 +256,7 @@ int add222_fetch (CACHEFILE cf __attribute__((__unused__)), CACHEKEY key, void *
}
void test_multi_filehandles (void) {
static void test_multi_filehandles (void) {
CACHETABLE t;
CACHEFILE f1,f2,f3;
char fname1[]="test_ct.dat";
......@@ -292,18 +292,18 @@ void test_multi_filehandles (void) {
r = toku_cachetable_close(&t); assert(r==0);
}
void test_dirty_flush(CACHEFILE f, CACHEKEY key, void *value, long size, BOOL do_write, BOOL keep, LSN modified_lsn __attribute__((__unused__)), BOOL rename_p __attribute__((__unused__))) {
static void test_dirty_flush(CACHEFILE f, CACHEKEY key, void *value, long size, BOOL do_write, BOOL keep, LSN modified_lsn __attribute__((__unused__)), BOOL rename_p __attribute__((__unused__))) {
printf("test_dirty_flush %p %lld %p %ld %d %d\n", f, key, value, size, do_write, keep);
}
int test_dirty_fetch(CACHEFILE f, CACHEKEY key, void **value_ptr, long *size_ptr, void *arg, LSN *written_lsn) {
static int test_dirty_fetch(CACHEFILE f, CACHEKEY key, void **value_ptr, long *size_ptr, void *arg, LSN *written_lsn) {
*value_ptr = arg;
written_lsn->lsn = 0;
printf("test_dirty_fetch %p %lld %p %ld %p\n", f, key, *value_ptr, *size_ptr, arg);
return 0;
}
void test_dirty() {
static void test_dirty() {
printf("test_dirty\n");
CACHETABLE t;
......@@ -377,7 +377,7 @@ void test_dirty() {
assert(pinned == 0);
r = toku_cachetable_get_and_pin(f, key, &value, NULL, test_dirty_flush,
test_dirty_fetch, 0);
test_dirty_fetch, 0);
assert(r == 0);
// cachetable_print_state(t);
......@@ -401,16 +401,16 @@ void test_dirty() {
assert(r == 0);
}
int test_size_debug;
CACHEKEY test_size_flush_key;
static int test_size_debug;
static CACHEKEY test_size_flush_key;
void test_size_flush_callback(CACHEFILE f, CACHEKEY key, void *value, long size, BOOL do_write, BOOL keep, LSN modified_lsn __attribute__((__unused__)), BOOL rename_p __attribute__((__unused__))) {
static void test_size_flush_callback(CACHEFILE f, CACHEKEY key, void *value, long size, BOOL do_write, BOOL keep, LSN modified_lsn __attribute__((__unused__)), BOOL rename_p __attribute__((__unused__))) {
if (test_size_debug) printf("test_size_flush %p %lld %p %ld %d %d\n", f, key, value, size, do_write, keep);
assert(do_write != 0);
test_size_flush_key = key;
}
void test_size_resize() {
static void test_size_resize() {
printf("test_size_resize\n");
CACHETABLE t;
......@@ -462,7 +462,7 @@ void test_size_resize() {
assert(r == 0);
}
void test_size_flush() {
static void test_size_flush() {
printf("test_size_flush\n");
CACHETABLE t;
......@@ -521,9 +521,9 @@ void test_size_flush() {
}
enum { KEYLIMIT = 4, TRIALLIMIT=64 };
CACHEKEY keys[KEYLIMIT];
void* vals[KEYLIMIT];
int n_keys=0;
static CACHEKEY keys[KEYLIMIT];
static void* vals[KEYLIMIT];
static int n_keys=0;
static void r_flush (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k, void *value,
......@@ -549,7 +549,7 @@ static void r_flush (CACHEFILE f __attribute__((__unused__)),
abort();
}
int r_fetch (CACHEFILE f __attribute__((__unused__)),
static int r_fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY key __attribute__((__unused__)),
void**value __attribute__((__unused__)),
long *sizep __attribute__((__unused__)),
......@@ -559,7 +559,7 @@ int r_fetch (CACHEFILE f __attribute__((__unused__)),
return 0;
}
void test_rename (void) {
static void test_rename (void) {
CACHETABLE t;
CACHEFILE f;
int i;
......
......@@ -9,13 +9,13 @@
#include <stdlib.h>
#include <unistd.h>
const int test_object_size = 1;
static const int test_object_size = 1;
CACHETABLE ct;
static CACHETABLE ct;
enum { N_PRESENT_LIMIT = 4, TRIALS=200, N_FILES=2 };
int n_present=0;
struct present_items {
static int n_present=0;
static struct present_items {
CACHEKEY key;
CACHEFILE cf;
} present_items[N_PRESENT_LIMIT];
......@@ -84,7 +84,7 @@ static int fetch_forchain (CACHEFILE f __attribute__((__unused__)), CACHEKEY key
return 0;
}
void verify_cachetable_against_present (void) {
static void verify_cachetable_against_present (void) {
int i;
for (i=0; i<n_present; i++) {
void *v;
......@@ -97,7 +97,7 @@ void verify_cachetable_against_present (void) {
}
void test_chaining (void) {
static void test_chaining (void) {
/* Make sure that the hash chain and the LRU list don't get confused. */
CACHEFILE f[N_FILES];
enum { FILENAME_LEN=100 };
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "Copyright (c) 2007 Tokutek Inc. All rights reserved."
#include "brt-internal.h"
#include <sys/types.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
int read_sint (int fd, int *result) {
unsigned char b[4];
int r = read(fd, b, 4);
if (r!=4) return 1;
*result = (b[0]<<24) | (b[1]<<16) | (b[2]<<8) | (b[3]<<0);
return 0;
}
int read_uint (int fd, unsigned int *result) {
int sresult;
int r = read_sint(fd, &sresult);
if (r==0) { *result = r; }
return r;
}
int write_int (int fd, unsigned int v) {
unsigned char b[4];
int r;
b[0] = (v>>24)&0xff;
b[1] = (v>>16)&0xff;
b[2] = (v>>8)&0xff;
b[3] = (v>>0)&0xff;
r = write(fd, b, 4);
if (r!=4) return 1;
return 0;
}
int read_diskoff (int fd, DISKOFF *result) {
unsigned int i0,i1;
int r;
r = read_uint(fd, &i0); if(r!=0) return r;
r = read_uint(fd, &i1); if(r!=0) return r;
*result = ((unsigned long long)i0)<<32 | ((unsigned long long)i1);
return 0;
}
int write_diskoff (int fd, DISKOFF v) {
int r;
r = write_int(fd, (unsigned int)(v>>32)); if (r!=0) return r;
r = write_int(fd, (unsigned int)(v&0xffffffff)); if (r!=0) return r;
return 0;
}
int read_bytes (int fd, int l, char *s) {
int r = read(fd, s, l);
if (r==l) return 0;
return -1;
}
int write_bytes (int fd, int l, char *s) {
int r= write(fd, s, l);
if (r==l) return 0;
return -1;
}
int read_brt_header (int fd, struct brt_header *header) {
{
off_t r = lseek(fd, 0, SEEK_SET);
assert(r==0);
}
/* Ignore magic for now. We'll need some magic at the beginning of the file. */
{
int r;
r = read_uint(fd, &header->nodesize);
if (r!=0) return -1;
r = read_diskoff(fd, &header->freelist); assert(r==0); /* These asserts should do something smarter. */
r = read_diskoff(fd, &header->unused_memory); assert(r==0);
r = read_sint(fd, &header->n_named_roots); assert(r==0);
if (header->n_named_roots>0) {
int i;
header->unnamed_root = -1;
MALLOC_N(header->n_named_roots, header->names);
MALLOC_N(header->n_named_roots, header->roots);
for (i=0; i<header->n_named_roots; i++) {
unsigned int l;
char *s;
r = read_diskoff(fd, &header->roots[i]); assert(r==0);
r = read_uint(fd, &l); assert(r==0); /* count includes the trailing null. */
MALLOC_N(l, s);
r = read_bytes(fd, l, s); assert(r==0);
assert(l>0 && s[l-1]==0);
header->names[i] = s;
}
} else {
r = read_diskoff(fd, &header->unnamed_root); assert(r==0);
header->names = 0;
header->roots = 0;
}
}
return 0;
}
int read_brt_h_unused_memory (int fd, DISKOFF *unused_memory) {
off_t r = lseek(fd, 12, SEEK_SET);
assert(r==12);
r = read_diskoff(fd, unused_memory);
return r;
}
int write_brt_h_unused_memory (int fd, DISKOFF unused_memory) {
off_t r = lseek(fd, 12, SEEK_SET);
assert(r==12);
r = write_diskoff(fd, unused_memory);
return r;
}
......@@ -106,6 +106,6 @@ int toku_default_compare_fun (DB *db __attribute__((__unused__)), const DBT *a,
return toku_keycompare(a->data, a->size, b->data, b->size);
}
int dont_call_this_compare_fun (DB *db __attribute__((__unused__)), const DBT *a __attribute__((__unused__)), const DBT*b __attribute__((__unused__))) {
int toku_dont_call_this_compare_fun (DB *db __attribute__((__unused__)), const DBT *a __attribute__((__unused__)), const DBT*b __attribute__((__unused__))) {
assert(0);
}
......@@ -7,4 +7,4 @@ int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2le
void toku_test_keycompare (void) ;
int toku_default_compare_fun (DB *, const DBT *, const DBT*);
int dont_call_this_compare_fun (DB *, const DBT *, const DBT*);
int toku_dont_call_this_compare_fun (DB *, const DBT *, const DBT*);
......@@ -19,8 +19,8 @@ struct tokulogger {
int n_in_buf;
};
int tokulogger_find_next_unused_log_file(const char *directory, long long *result);
int tokulogger_find_logfiles (const char *directory, int *n_resultsp, char ***resultp);
int toku_logger_find_next_unused_log_file(const char *directory, long long *result);
int toku_logger_find_logfiles (const char *directory, int *n_resultsp, char ***resultp);
enum lt_command {
LT_COMMIT = 'C',
......@@ -42,7 +42,7 @@ struct tokutxn {
LSN last_lsn; /* Everytime anything is logged, update the LSN. (We need to atomically record the LSN along with writing into the log.) */
};
int tokulogger_finish (TOKULOGGER logger, struct wbuf *wbuf);
int toku_logger_finish (TOKULOGGER logger, struct wbuf *wbuf);
static inline int toku_logsizeof_u_int8_t (u_int32_t v __attribute__((__unused__))) {
return 1;
......
......@@ -18,23 +18,23 @@ int main (int argc __attribute__((__unused__)),
long long lognum;
system(rmrf);
r = mkdir(dname, 0700); assert(r==0);
r = tokulogger_find_next_unused_log_file(dname,&lognum);
r = toku_logger_find_next_unused_log_file(dname,&lognum);
assert(r==0 && lognum==0LL);
r = creat(dname "/log01.tokulog", 0700); assert(r>=0);
r = close(r); assert(r==0);
r = tokulogger_find_next_unused_log_file(dname,&lognum);
r = toku_logger_find_next_unused_log_file(dname,&lognum);
assert(r==0 && lognum==2LL);
r = creat(dname "/log123456789012345.tokulog", 0700); assert(r>=0);
r = close(r); assert(r==0);
r = tokulogger_find_next_unused_log_file(dname,&lognum);
r = toku_logger_find_next_unused_log_file(dname,&lognum);
assert(r==0 && lognum==123456789012346LL);
r = creat(dname "/log3.tokulog", 0700); assert(r>=0);
r = close(r); assert(r==0);
r = tokulogger_find_next_unused_log_file(dname,&lognum);
r = toku_logger_find_next_unused_log_file(dname,&lognum);
assert(r==0 && lognum==123456789012346LL);
return 0;
......
......@@ -21,7 +21,7 @@
#include "../src/ydb-internal.h"
#include "log_header.h"
int tokulogger_find_next_unused_log_file(const char *directory, long long *result) {
int toku_logger_find_next_unused_log_file(const char *directory, long long *result) {
DIR *d=opendir(directory);
long long max=-1;
struct dirent *de;
......@@ -37,7 +37,7 @@ int tokulogger_find_next_unused_log_file(const char *directory, long long *resul
return r;
}
int tokulogger_find_logfiles (const char *directory, int *n_resultsp, char ***resultp) {
int toku_logger_find_logfiles (const char *directory, int *n_resultsp, char ***resultp) {
int result_limit=1;
int n_results=0;
char **MALLOC_N(result_limit, result);
......@@ -64,12 +64,12 @@ int tokulogger_find_logfiles (const char *directory, int *n_resultsp, char ***re
return closedir(d);
}
int tokulogger_create_and_open_logger (const char *directory, TOKULOGGER *resultp) {
int toku_logger_create_and_open_logger (const char *directory, TOKULOGGER *resultp) {
TAGMALLOC(TOKULOGGER, result);
if (result==0) return -1;
int r;
long long nexti;
r = tokulogger_find_next_unused_log_file(directory, &nexti);
r = toku_logger_find_next_unused_log_file(directory, &nexti);
if (r!=0) {
died0:
toku_free(result);
......@@ -84,12 +84,12 @@ int tokulogger_create_and_open_logger (const char *directory, TOKULOGGER *result
result->lsn.lsn = 0; // WRONG!!! This should actually be calculated by looking at the log file.
*resultp=result;
return tokulogger_log_bytes(result, 0, "");
return toku_logger_log_bytes(result, 0, "");
}
static int log_format_version=0;
int tokulogger_log_bytes(TOKULOGGER logger, int nbytes, void *bytes) {
int toku_logger_log_bytes(TOKULOGGER logger, int nbytes, void *bytes) {
int r;
//fprintf(stderr, "%s:%d logging %d bytes\n", __FILE__, __LINE__, nbytes);
if (logger->fd==-1) {
......@@ -129,7 +129,7 @@ int tokulogger_log_bytes(TOKULOGGER logger, int nbytes, void *bytes) {
return 0;
}
int tokulogger_log_close(TOKULOGGER *loggerp) {
int toku_logger_log_close(TOKULOGGER *loggerp) {
TOKULOGGER logger = *loggerp;
int r = 0;
if (logger->fd!=-1) {
......@@ -146,7 +146,7 @@ int tokulogger_log_close(TOKULOGGER *loggerp) {
return r;
}
#if 0
int tokulogger_log_brt_remove (TOKULOGGER logger,
int toku_logger_log_brt_remove (TOKULOGGER logger,
TXNID txnid,
diskoff diskoff,
unsigned char *key,
......@@ -157,7 +157,7 @@ n
}
#endif
int tokulogger_fsync (TOKULOGGER logger) {
int toku_logger_fsync (TOKULOGGER logger) {
//return 0;/// NO TXN
//fprintf(stderr, "%s:%d syncing log\n", __FILE__, __LINE__);
if (logger->n_in_buf>0) {
......@@ -172,14 +172,14 @@ int tokulogger_fsync (TOKULOGGER logger) {
return 0;
}
int tokulogger_finish (TOKULOGGER logger, struct wbuf *wbuf) {
int toku_logger_finish (TOKULOGGER logger, struct wbuf *wbuf) {
wbuf_int(wbuf, toku_crc32(0, wbuf->buf, wbuf->ndone));
wbuf_int(wbuf, 4+wbuf->ndone);
return tokulogger_log_bytes(logger, wbuf->ndone, wbuf->buf);
return toku_logger_log_bytes(logger, wbuf->ndone, wbuf->buf);
}
// Log an insertion of a key-value pair into a particular node of the tree.
int tokulogger_log_brt_insert_with_no_overwrite (TOKULOGGER logger,
int toku_logger_log_brt_insert_with_no_overwrite (TOKULOGGER logger,
TXNID txnid,
FILENUM fileid,
DISKOFF diskoff,
......@@ -207,10 +207,10 @@ int tokulogger_log_brt_insert_with_no_overwrite (TOKULOGGER logger,
wbuf_DISKOFF(&wbuf, diskoff);
wbuf_bytes(&wbuf, key, keylen);
wbuf_bytes(&wbuf, val, vallen);
return tokulogger_finish (logger, &wbuf);
return toku_logger_finish (logger, &wbuf);
}
int tokulogger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF diskoff, int is_add, const struct kv_pair *pair) {
int toku_logger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF diskoff, int is_add, const struct kv_pair *pair) {
assert(is_add==0);
if (txn==0) return 0;
assert(db);
......@@ -235,16 +235,16 @@ int tokulogger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF disk
wbuf_DISKOFF(&wbuf, diskoff);
wbuf_bytes(&wbuf, kv_pair_key_const(pair), keylen);
wbuf_bytes(&wbuf, kv_pair_val_const(pair), vallen);
return tokulogger_finish(txn->logger, &wbuf);
return toku_logger_finish(txn->logger, &wbuf);
}
int tokulogger_commit (TOKUTXN txn) {
int toku_logger_commit (TOKUTXN txn) {
int r = toku_log_commit(txn, txn->txnid64);
toku_free(txn);
return r;
}
int tokulogger_log_checkpoint (TOKULOGGER logger, LSN *lsn) {
int toku_logger_log_checkpoint (TOKULOGGER logger, LSN *lsn) {
struct wbuf wbuf;
const int buflen =10;
unsigned char buf[buflen];
......@@ -253,11 +253,11 @@ int tokulogger_log_checkpoint (TOKULOGGER logger, LSN *lsn) {
wbuf_LSN (&wbuf, logger->lsn);
*lsn = logger->lsn;
logger->lsn.lsn++;
return tokulogger_log_bytes(logger, wbuf.ndone, wbuf.buf);
return toku_logger_log_bytes(logger, wbuf.ndone, wbuf.buf);
}
int tokutxn_begin (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TXNID txnid64, TOKULOGGER logger) {
int toku_logger_txn_begin (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TXNID txnid64, TOKULOGGER logger) {
TAGMALLOC(TOKUTXN, result);
if (result==0) return errno;
result->txnid64 = txnid64;
......@@ -267,7 +267,7 @@ int tokutxn_begin (TOKUTXN parent_tokutxn, TOKUTXN *tokutxn, TXNID txnid64, TOKU
return 0;
}
int tokulogger_log_block_rename (TOKULOGGER logger, FILENUM fileid, DISKOFF olddiskoff, DISKOFF newdiskoff, DISKOFF parentdiskoff, int childnum) {
int toku_logger_log_block_rename (TOKULOGGER logger, FILENUM fileid, DISKOFF olddiskoff, DISKOFF newdiskoff, DISKOFF parentdiskoff, int childnum) {
const int buflen=(+1 // log command
+8 // lsn
+8 // fileid
......@@ -288,10 +288,10 @@ int tokulogger_log_block_rename (TOKULOGGER logger, FILENUM fileid, DISKOFF oldd
wbuf_DISKOFF(&wbuf, newdiskoff);
wbuf_DISKOFF(&wbuf, parentdiskoff);
wbuf_int (&wbuf, childnum);
return tokulogger_finish(logger, &wbuf);
return toku_logger_finish(logger, &wbuf);
}
int tokulogger_log_fcreate (TOKUTXN txn, const char *fname, int mode) {
int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, int mode) {
BYTESTRING bs;
bs.len = strlen(fname);
bs.data = (char*)fname;
......@@ -299,7 +299,7 @@ int tokulogger_log_fcreate (TOKUTXN txn, const char *fname, int mode) {
}
/* fopen isn't really an action. It's just for bookkeeping. We need to know the filename that goes with a filenum. */
int tokulogger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum) {
int toku_logger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum) {
BYTESTRING bs;
bs.len = strlen(fname);
bs.data = (char*)fname;
......@@ -308,7 +308,7 @@ int tokulogger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum) {
}
int tokulogger_log_unlink (TOKUTXN txn, const char *fname) {
int toku_logger_log_unlink (TOKUTXN txn, const char *fname) {
if (txn==0) return 0;
const int fnamelen = strlen(fname);
const int buflen = (+1 // log command
......@@ -321,10 +321,10 @@ int tokulogger_log_unlink (TOKUTXN txn, const char *fname) {
wbuf_init (&wbuf, buf, buflen);
wbuf_char (&wbuf, LT_UNLINK);
wbuf_bytes(&wbuf, fname, fnamelen);
return tokulogger_finish(txn->logger, &wbuf);
return toku_logger_finish(txn->logger, &wbuf);
};
int tokulogger_log_header (TOKUTXN txn, FILENUM filenum, struct brt_header *h) {
int toku_logger_log_header (TOKUTXN txn, FILENUM filenum, struct brt_header *h) {
#if 0
LOGGEDBRTHEADER lh;
lh.size = toku_serialize_brt_header_size(h);
......@@ -371,7 +371,7 @@ int tokulogger_log_header (TOKUTXN txn, FILENUM filenum, struct brt_header *h) {
wbuf_FILENUM(&wbuf, filenum);
r=toku_serialize_brt_header_to_wbuf(&wbuf, h);
if (r!=0) return r;
r=tokulogger_finish(txn->logger, &wbuf);
r=toku_logger_finish(txn->logger, &wbuf);
toku_free(buf);
return r;
#endif
......
......@@ -6,30 +6,30 @@
#include "../include/db.h"
#include "brttypes.h"
#include "kv-pair.h"
int tokulogger_create_and_open_logger (const char *directory, TOKULOGGER *resultp);
int tokulogger_log_bytes(TOKULOGGER logger, int nbytes, void *bytes);
int tokulogger_log_close(TOKULOGGER *logger);
int tokulogger_log_checkpoint (TOKULOGGER, LSN*);
int toku_logger_create_and_open_logger (const char *directory, TOKULOGGER *resultp);
int toku_logger_log_bytes(TOKULOGGER logger, int nbytes, void *bytes);
int toku_logger_log_close(TOKULOGGER *logger);
int toku_logger_log_checkpoint (TOKULOGGER, LSN*);
int tokulogger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF diskoff, int is_add, const struct kv_pair *pair);
int toku_logger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF diskoff, int is_add, const struct kv_pair *pair);
int tokulogger_commit (TOKUTXN txn);
int toku_logger_commit (TOKUTXN txn);
int tokulogger_log_block_rename (TOKULOGGER /*logger*/, FILENUM /*fileid*/, DISKOFF /*olddiskoff*/, DISKOFF /*newdiskoff*/, DISKOFF /*parentdiskoff*/, int /*childnum*/);
int toku_logger_log_block_rename (TOKULOGGER /*logger*/, FILENUM /*fileid*/, DISKOFF /*olddiskoff*/, DISKOFF /*newdiskoff*/, DISKOFF /*parentdiskoff*/, int /*childnum*/);
int tokutxn_begin (TOKUTXN /*parent*/,TOKUTXN *, TXNID /*txnid64*/, TOKULOGGER /*logger*/);
int toku_logger_txn_begin (TOKUTXN /*parent*/,TOKUTXN *, TXNID /*txnid64*/, TOKULOGGER /*logger*/);
int tokulogger_log_fcreate (TOKUTXN, const char */*fname*/, int /*mode*/);
int toku_logger_log_fcreate (TOKUTXN, const char */*fname*/, int /*mode*/);
int tokulogger_log_fopen (TOKUTXN, const char * /*fname*/, FILENUM);
int toku_logger_log_fopen (TOKUTXN, const char * /*fname*/, FILENUM);
int tokulogger_log_unlink (TOKUTXN, const char */*fname*/);
int toku_logger_log_unlink (TOKUTXN, const char */*fname*/);
int tokulogger_log_header (TOKUTXN, FILENUM, struct brt_header *);
int toku_logger_log_header (TOKUTXN, FILENUM, struct brt_header *);
int tokulogger_log_newbrtnode (TOKUTXN txn, FILENUM filenum, DISKOFF offset, u_int32_t height, u_int32_t nodesize, char is_dup_sort_mode, u_int32_t rand4fingerprint);
int toku_logger_log_newbrtnode (TOKUTXN txn, FILENUM filenum, DISKOFF offset, u_int32_t height, u_int32_t nodesize, char is_dup_sort_mode, u_int32_t rand4fingerprint);
int tokulogger_fsync (TOKULOGGER logger);
int toku_logger_fsync (TOKULOGGER logger);
int toku_fread_u_int8_t (FILE *f, u_int8_t *v, u_int32_t *crc, u_int32_t *len);
......
......@@ -163,14 +163,14 @@ void generate_log_writer (void) {
fprintf(cf, " txn->logger->lsn.lsn++;\n");
DO_FIELDS(ft, lt,
fprintf(cf, " wbuf_%s(&wbuf, %s);\n", ft->type, ft->name));
fprintf(cf, " int r= tokulogger_finish(txn->logger, &wbuf);\n");
fprintf(cf, " int r= toku_logger_finish(txn->logger, &wbuf);\n");
fprintf(cf, " assert(wbuf.ndone==buflen);\n");
fprintf(cf, " toku_free(buf);\n");
if (lt->command=='C') {
fprintf(cf, " if (r!=0) return r;\n");
fprintf(cf, " // commit has some extra work to do.\n");
fprintf(cf, " if (txn->parent) return 0; // don't fsync if there is a parent.\n");
fprintf(cf, " else return tokulogger_fsync(txn->logger);\n");
fprintf(cf, " else return toku_logger_fsync(txn->logger);\n");
} else {
fprintf(cf, " return r;\n");
}
......@@ -181,8 +181,7 @@ void generate_log_writer (void) {
void generate_log_reader (void) {
DO_LOGTYPES(lt, ({
fprintf(cf, "static int tokulog_fread_%s (FILE *infile, struct logtype_%s *data, u_int32_t crc)", lt->name, lt->name);
fprintf(hf, ";\n");
fprintf(cf, "static int toku_log_fread_%s (FILE *infile, struct logtype_%s *data, u_int32_t crc)", lt->name, lt->name);
fprintf(cf, " {\n");
fprintf(cf, " int r=0;\n");
fprintf(cf, " u_int32_t actual_len=5; // 1 for the command, 4 for the first len.\n");
......@@ -196,7 +195,7 @@ void generate_log_reader (void) {
fprintf(cf, " return 0;\n");
fprintf(cf, "}\n\n");
}));
fprintf2(cf, hf, "int tokulog_fread (FILE *infile, struct log_entry *le)");
fprintf2(cf, hf, "int toku_log_fread (FILE *infile, struct log_entry *le)");
fprintf(hf, ";\n");
fprintf(cf, " {\n");
fprintf(cf, " u_int32_t len1; int r;\n");
......@@ -210,7 +209,7 @@ void generate_log_reader (void) {
fprintf(cf, " switch ((enum lt_cmd)cmd) {\n");
DO_LOGTYPES(lt, ({
fprintf(cf, " case LT_%s:\n", lt->name);
fprintf(cf, " return tokulog_fread_%s (infile, &le->u.%s, crc);\n", lt->name, lt->name);
fprintf(cf, " return toku_log_fread_%s (infile, &le->u.%s, crc);\n", lt->name, lt->name);
}));
fprintf(cf, " };\n");
fprintf(cf, " return DB_BADFORMAT;\n"); // Should read past the record using the len field.
......
......@@ -1000,7 +1000,7 @@ int toku_pma_insert (PMA pma, DBT *k, DBT *v, TOKUTXN txn, DISKOFF diskoff, u_in
pma->pairs[idx] = pma_malloc_kv_pair(pma, k->data, k->size, v->data, v->size);
assert(pma->pairs[idx]);
*fingerprint += rand4fingerprint*toku_calccrc32_kvpair(k->data, k->size, v->data, v->size);
int r = tokulogger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, pma->pairs[idx]);
int r = toku_logger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, pma->pairs[idx]);
return r;
} else
return BRT_ALREADY_THERE; /* It is already here. Return an error. */
......@@ -1203,7 +1203,7 @@ int toku_pma_insert_or_replace (PMA pma, DBT *k, DBT *v,
if (!kv_pair_deleted(pma->pairs[idx])) {
*replaced_v_size = kv->vallen;
*fingerprint -= rand4fingerprint*toku_calccrc32_kvpair(kv_pair_key_const(kv), kv_pair_keylen(kv), kv_pair_val_const(kv), kv_pair_vallen(kv));
r=tokulogger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, kv);
r=toku_logger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, kv);
if (r!=0) return r;
}
if (v->size == (unsigned int) kv_pair_vallen(kv)) {
......@@ -1213,7 +1213,7 @@ int toku_pma_insert_or_replace (PMA pma, DBT *k, DBT *v,
pma->pairs[idx] = pma_malloc_kv_pair(pma, k->data, k->size, v->data, v->size);
assert(pma->pairs[idx]);
}
r = tokulogger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, pma->pairs[idx]);
r = toku_logger_log_phys_add_or_delete_in_leaf(pma->db, txn, diskoff, 0, pma->pairs[idx]);
*fingerprint += rand4fingerprint*toku_calccrc32_kvpair(k->data, k->size, v->data, v->size);
return r;
}
......
......@@ -145,4 +145,6 @@ void toku_pma_verify_fingerprint (PMA pma, u_int32_t rand4fingerprint, u_int32_t
int toku_pma_set_at_index (PMA, int /*index*/, DBT */*key*/, DBT */*value*/); // If the index is wrong or there is a value already, return nonzero
void toku_pma_show_stats (void);
#endif
......@@ -98,7 +98,7 @@ static void toku_recover_newbrtnode (struct logtype_newbrtnode *c) {
n->local_fingerprint = 0; // nothing there yet
n->dirty = 1;
if (c->height==0) {
r=toku_pma_create(&n->u.l.buffer, dont_call_this_compare_fun, null_db, c->filenum, c->nodesize);
r=toku_pma_create(&n->u.l.buffer, toku_dont_call_this_compare_fun, null_db, c->filenum, c->nodesize);
assert(r==0);
n->u.l.n_bytes_in_buffer=0;
} else {
......@@ -157,7 +157,7 @@ int main (int argc, char *argv[]) {
dir = argv[1];
int n_logfiles;
char **logfiles;
r = tokulogger_find_logfiles(dir, &n_logfiles, &logfiles);
r = toku_logger_find_logfiles(dir, &n_logfiles, &logfiles);
if (r!=0) exit(1);
int i;
r = toku_create_cachetable(&ct, 1<<25, (LSN){0}, 0);
......@@ -168,7 +168,7 @@ int main (int argc, char *argv[]) {
u_int32_t version;
r=toku_read_and_print_logmagic(f, &version);
assert(r==0 && version==0);
while ((r = tokulog_fread(f, &le))==0) {
while ((r = toku_log_fread(f, &le))==0) {
printf("Got cmd %c\n", le.cmd);
logtype_dispatch(le, toku_recover_);
}
......
......@@ -26,7 +26,7 @@ clean:
rm -rf $(LIBNAME).$(LIBEXT) $(LIBNAME).a *.o
ydb.o: ../include/db.h ../newbrt/cachetable.h ../newbrt/brt.h ../newbrt/log.c
DBBINS = ydb.o ../newbrt/brt.o ../newbrt/brt-serialize.o ../newbrt/brt-verify.o ../newbrt/cachetable.o ../newbrt/hashtable.o ../newbrt/header-io.o ../newbrt/key.o ../newbrt/memory.o ../newbrt/mempool.o ../newbrt/pma.o ../newbrt/ybt.o ../newbrt/primes.o ../newbrt/log.o ../newbrt/fingerprint.o ../newbrt/log_code.o
DBBINS = ydb.o ../newbrt/brt.o ../newbrt/brt-serialize.o ../newbrt/brt-verify.o ../newbrt/cachetable.o ../newbrt/hashtable.o ../newbrt/key.o ../newbrt/memory.o ../newbrt/mempool.o ../newbrt/pma.o ../newbrt/ybt.o ../newbrt/primes.o ../newbrt/log.o ../newbrt/fingerprint.o ../newbrt/log_code.o
$(LIBNAME).$(LIBEXT): $(DBBINS)
cc $(CPPFLAGS) $(DBBINS) $(SHARED) -o $@ $(CFLAGS) -lz
......
......@@ -268,13 +268,13 @@ static int toku_db_env_open(DB_ENV * env, const char *home, u_int32_t flags, int
if (flags & (DB_INIT_TXN | DB_INIT_LOG)) {
char* full_dir = NULL;
if (env->i->lg_dir) full_dir = construct_full_name(env->i->dir, env->i->lg_dir);
r = tokulogger_create_and_open_logger(
r = toku_logger_create_and_open_logger(
full_dir ? full_dir : env->i->dir, &env->i->logger);
if (full_dir) toku_free(full_dir);
if (r!=0) goto died1;
if (0) {
died2:
tokulogger_log_close(&env->i->logger);
toku_logger_log_close(&env->i->logger);
goto died1;
}
}
......@@ -290,7 +290,7 @@ static int toku_db_env_close(DB_ENV * env, u_int32_t flags) {
if (env->i->cachetable)
r0=toku_cachetable_close(&env->i->cachetable);
if (env->i->logger)
r1=tokulogger_log_close(&env->i->logger);
r1=toku_logger_log_close(&env->i->logger);
if (env->i->data_dir)
toku_free(env->i->data_dir);
if (env->i->lg_dir)
......@@ -451,7 +451,7 @@ static int toku_db_txn_commit(DB_TXN * txn, u_int32_t flags) {
//notef("flags=%d\n", flags);
if (!txn)
return -1;
int r = tokulogger_commit(txn->i->tokutxn);
int r = toku_logger_commit(txn->i->tokutxn);
if (r != 0)
return r;
if (txn->i)
......@@ -478,7 +478,7 @@ static int toku_txn_begin(DB_ENV * env, DB_TXN * stxn, DB_TXN ** txn, u_int32_t
MALLOC(result->i);
assert(result->i);
result->i->parent = stxn;
int r = tokutxn_begin(stxn ? stxn->i->tokutxn : 0, &result->i->tokutxn, next_txn++, env->i->logger);
int r = toku_logger_txn_begin(stxn ? stxn->i->tokutxn : 0, &result->i->tokutxn, next_txn++, env->i->logger);
if (r != 0)
return r;
*txn = result;
......@@ -493,7 +493,7 @@ static int toku_txn_abort(DB_TXN * txn) {
#if 0
int txn_commit(DB_TXN * txn, u_int32_t flags) {
fprintf(stderr, "%s:%d\n", __FILE__, __LINE__);
return tokulogger_log_commit(txn->i->tokutxn);
return toku_logger_log_commit(txn->i->tokutxn);
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment