Commit a346f6c7 authored by Yoni Fogel's avatar Yoni Fogel

[t:2808] Merge 2808 onto main, remove dev and merge branches

git-svn-id: file:///svn/toku/tokudb@23201 c7de825b-a66e-492c-adef-691d508d4ae1
parent 83b56c3c
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -266,7 +267,9 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
void* __toku_dummy0[18];
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void* __toku_dummy0[16];
char __toku_dummy1[64];
void *api1_internal; /* 32-bit offset=212 size=4, 64=bit offset=360 size=8 */
void* __toku_dummy2[7];
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -268,7 +269,9 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
void* __toku_dummy0[18];
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void* __toku_dummy0[16];
char __toku_dummy1[96];
void *api1_internal; /* 32-bit offset=244 size=4, 64=bit offset=392 size=8 */
void* __toku_dummy2[7];
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -270,7 +271,9 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
void* __toku_dummy0[33];
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void* __toku_dummy0[31];
char __toku_dummy1[128];
void *api1_internal; /* 32-bit offset=336 size=4, 64=bit offset=544 size=8 */
void* __toku_dummy2[7];
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -270,7 +271,9 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
void* __toku_dummy0[33];
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void* __toku_dummy0[31];
char __toku_dummy1[128];
void *api1_internal; /* 32-bit offset=336 size=4, 64=bit offset=544 size=8 */
void* __toku_dummy2[8];
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -271,7 +272,9 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
void* __toku_dummy0[34];
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void* __toku_dummy0[32];
char __toku_dummy1[144];
void *api1_internal; /* 32-bit offset=356 size=4, 64=bit offset=568 size=8 */
void* __toku_dummy2[8];
......
......@@ -468,8 +468,9 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
printf(" int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */ \n");
printf(" int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */ \n");
printf(" u_int32_t range_locks_max; /* max total number of range locks */ \n");
printf(" u_int32_t range_locks_max_per_index; /* max range locks per dictionary */ \n");
printf(" u_int32_t range_locks_curr; /* total range locks currently in use */ \n");
printf(" u_int64_t range_locks_max_memory; /* max total bytes of range locks */ \n");
printf(" u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */ \n");
printf(" u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */ \n");
printf(" u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */ \n");
printf(" u_int64_t range_read_locks; /* total range read locks taken */ \n");
......@@ -553,6 +554,8 @@ int main (int argc __attribute__((__unused__)), char *const argv[] __attribute__
" void *extra))",
"int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */",
"int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */",
"int (*set_lk_max_memory) (DB_ENV *env, uint64_t max)",
"int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max)",
NULL};
print_struct("db_env", 1, db_env_fields32, db_env_fields64, sizeof(db_env_fields32)/sizeof(db_env_fields32[0]), extra);
}
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -271,6 +272,8 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void *api1_internal;
int (*close) (DB_ENV *, u_int32_t);
int (*dbremove) (DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t);
......
......@@ -105,8 +105,9 @@ typedef struct __toku_engine_status {
int64_t local_checkpoint_files; /* number of files subjec to local checkpoint is taken for commit */
int64_t local_checkpoint_during_checkpoint; /* number of times a local checkpoint happens during normal checkpoint */
u_int32_t range_locks_max; /* max total number of range locks */
u_int32_t range_locks_max_per_index; /* max range locks per dictionary */
u_int32_t range_locks_curr; /* total range locks currently in use */
u_int64_t range_locks_max_memory; /* max total bytes of range locks */
u_int64_t range_locks_curr_memory; /* total bytes of range locks currently in use */
u_int32_t range_lock_escalation_successes; /* number of times range locks escalation succeeded */
u_int32_t range_lock_escalation_failures; /* number of times range locks escalation failed */
u_int64_t range_read_locks; /* total range read locks taken */
......@@ -271,6 +272,8 @@ struct __toku_db_env {
void *extra));
int (*get_redzone) (DB_ENV *env, int *redzone) /* get the redzone limit */;
int (*set_redzone) (DB_ENV *env, int redzone) /* set the redzone limit in percent of total space */;
int (*set_lk_max_memory) (DB_ENV *env, uint64_t max);
int (*get_lk_max_memory) (DB_ENV *env, uint64_t *max);
void *api1_internal;
int (*close) (DB_ENV *, u_int32_t);
int (*dbremove) (DB_ENV *, DB_TXN *, const char *, const char *, u_int32_t);
......
......@@ -17,9 +17,9 @@
/* TODO: investigate whether we can remove the user_memory functions */
/* TODO: reallocate the hash idlth if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */
const u_int32_t __toku_idlth_init_size = 521;
const uint32_t __toku_idlth_init_size = 521;
static inline u_int32_t toku__idlth_hash(toku_idlth* idlth, DICTIONARY_ID dict_id) {
static inline uint32_t toku__idlth_hash(toku_idlth* idlth, DICTIONARY_ID dict_id) {
uint32_t tmp = dict_id.dictid;
return tmp % idlth->num_buckets;
}
......@@ -66,7 +66,7 @@ int toku_idlth_create(toku_idlth** pidlth,
toku_lt_map* toku_idlth_find(toku_idlth* idlth, DICTIONARY_ID dict_id) {
assert(idlth);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
uint32_t index = toku__idlth_hash(idlth, dict_id);
toku_idlth_elt* head = &idlth->buckets[index];
toku_idlth_elt* current = head->next_in_bucket;
while (current) {
......@@ -105,7 +105,7 @@ void toku_idlth_delete(toku_idlth* idlth, DICTIONARY_ID dict_id) {
/* Must have elements. */
assert(idlth->num_keys);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
uint32_t index = toku__idlth_hash(idlth, dict_id);
toku_idlth_elt* head = &idlth->buckets[index];
toku_idlth_elt* prev = head;
toku_idlth_elt* current = prev->next_in_bucket;
......@@ -131,7 +131,7 @@ int toku_idlth_insert(toku_idlth* idlth, DICTIONARY_ID dict_id) {
assert(idlth);
toku__invalidate_scan(idlth);
u_int32_t index = toku__idlth_hash(idlth, dict_id);
uint32_t index = toku__idlth_hash(idlth, dict_id);
/* Allocate a new one. */
toku_idlth_elt* element = (toku_idlth_elt*)idlth->malloc(sizeof(*element));
......
......@@ -42,8 +42,8 @@ struct __toku_idlth_elt {
typedef struct __toku_idlth toku_idlth;
struct __toku_idlth {
toku_idlth_elt* buckets;
u_int32_t num_buckets;
u_int32_t num_keys;
uint32_t num_buckets;
uint32_t num_keys;
toku_idlth_elt iter_head;
toku_idlth_elt* iter_curr;
BOOL iter_is_valid;
......
This diff is collapsed.
......@@ -24,6 +24,7 @@
#include <lth.h>
#include <rth.h>
#include <idlth.h>
#include <omt.h>
#include "toku_assert.h"
......@@ -93,7 +94,7 @@ struct __toku_lock_tree {
the lt, we made copies from the DB at some point
*/
toku_range* buf;
u_int32_t buflen; /**< The length of buf */
uint32_t buflen; /**< The length of buf */
/** Whether lock escalation is allowed. */
BOOL lock_escalation_allowed;
/** Lock tree manager */
......@@ -110,16 +111,13 @@ struct __toku_lock_tree {
void (*free) (void*);
/** The user realloc function */
void* (*realloc)(void*, size_t);
/** The maximum number of locks allowed for this lock tree. */
u_int32_t max_locks;
/** The current number of locks for this lock tree. */
u_int32_t curr_locks;
/** The number of references held by DB instances and transactions to this lock tree*/
u_int32_t ref_count;
uint32_t ref_count;
/** DICTIONARY_ID associated with the lock tree */
DICTIONARY_ID dict_id;
TXNID table_lock_owner;
BOOL table_is_locked;
OMT dbs; //The extant dbs using this lock tree.
};
......@@ -139,11 +137,13 @@ typedef struct ltm_status {
struct __toku_ltm {
/** The maximum number of locks allowed for the environment. */
u_int32_t max_locks;
uint32_t max_locks;
/** The current number of locks for the environment. */
u_int32_t curr_locks;
/** The maximum number of locks allowed for the db. */
u_int32_t max_locks_per_db;
uint32_t curr_locks;
/** The maximum amount of memory for locks allowed for the environment. */
uint64_t max_lock_memory;
/** The current amount of memory for locks for the environment. */
uint64_t curr_lock_memory;
/** Status / accountability information */
LTM_STATUS_S status;
/** The list of lock trees it manages. */
......@@ -185,7 +185,7 @@ struct __toku_point {
toku_lock_tree* lt; /**< The lock tree, where toku_lt_point_cmp
is defined */
void* key_payload; /**< The key ... */
u_int32_t key_len; /**< and its length */
uint32_t key_len; /**< and its length */
};
#if !defined(__TOKU_POINT)
#define __TOKU_POINT
......@@ -229,7 +229,7 @@ int toku_lt_create(toku_lock_tree** ptree,
Gets a lock tree for a given DB with id dict_id
*/
int toku_ltm_get_lt(toku_ltm* mgr, toku_lock_tree** ptree,
DICTIONARY_ID dict_id);
DICTIONARY_ID dict_id, DB *db);
void toku_ltm_invalidate_lt(toku_ltm* mgr, DICTIONARY_ID dict_id);
......@@ -419,7 +419,8 @@ int toku_lt_unlock(toku_lock_tree* tree, TXNID txn);
- May return other errors due to system calls.
*/
int toku_ltm_create(toku_ltm** pmgr,
u_int32_t max_locks,
uint32_t max_locks,
uint64_t max_lock_memory,
int (*panic)(DB*, int),
toku_dbt_cmp (*get_compare_fun_from_db)(DB*),
void* (*user_malloc) (size_t),
......@@ -450,50 +451,29 @@ int toku_ltm_close(toku_ltm* mgr);
- EDOM if max_locks is less than the number of locks held by any lock tree
held by the manager
*/
int toku_ltm_set_max_locks(toku_ltm* mgr, u_int32_t max_locks);
int toku_ltm_set_max_locks(toku_ltm* mgr, uint32_t max_locks);
/**
Sets the maximum number of locks for each lock tree.
This is a temporary function until we can complete ticket #596.
This will be used instead of toku_ltm_set_max_locks.
int toku_ltm_get_max_lock_memory(toku_ltm* mgr, uint64_t* max_lock_memory);
\param mgr The lock tree manager to which to set max_locks.
\param max_locks The new maximum number of locks.
int toku_ltm_set_max_lock_memory(toku_ltm* mgr, uint64_t max_lock_memory);
\return
- 0 on success.
- EINVAL if tree is NULL or max_locks is 0
- EDOM if max_locks is less than the number of locks held by any lock tree
held by the manager
*/
int toku_ltm_set_max_locks_per_db(toku_ltm* mgr, u_int32_t max_locks);
void toku_ltm_get_status(toku_ltm* mgr, uint32_t * max_locks, uint32_t * curr_locks,
uint64_t *max_lock_memory, uint64_t *curr_lock_memory,
LTM_STATUS s);
/**
Sets the maximum number of locks on the lock tree manager.
\param mgr The lock tree manager to which to set max_locks.
\param max_locks A buffer to return the number of max locks.
\return
- 0 on success.
- EINVAL if any parameter is NULL.
*/
void toku_ltm_get_status(toku_ltm* mgr, uint32_t * max_locks, uint32_t * curr_locks, uint32_t * max_locks_per_db, LTM_STATUS s);
int toku_ltm_get_max_locks(toku_ltm* mgr, u_int32_t* max_locks);
int toku_ltm_get_max_locks_per_db(toku_ltm* mgr, u_int32_t* max_locks);
int toku_ltm_get_max_locks(toku_ltm* mgr, uint32_t* max_locks);
void toku_lt_add_ref(toku_lock_tree* tree);
int toku_lt_remove_ref(toku_lock_tree* tree);
int toku__lt_point_cmp(const toku_point* x, const toku_point* y);
void toku_lt_remove_db_ref(toku_lock_tree* tree, DB *db);
int toku_lt_point_cmp(const toku_point* x, const toku_point* y);
toku_range_tree* toku__lt_ifexist_selfread(toku_lock_tree* tree, TXNID txn);
toku_range_tree* toku_lt_ifexist_selfread(toku_lock_tree* tree, TXNID txn);
toku_range_tree* toku__lt_ifexist_selfwrite(toku_lock_tree* tree, TXNID txn);
toku_range_tree* toku_lt_ifexist_selfwrite(toku_lock_tree* tree, TXNID txn);
#if defined(__cplusplus)
}
......
......@@ -16,9 +16,9 @@
#include <string.h>
/* TODO: reallocate the hash lth if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */
const u_int32_t __toku_lth_init_size = 521;
const uint32_t __toku_lth_init_size = 521;
static inline u_int32_t toku__lth_hash(toku_lth* lth, toku_lock_tree* key) {
static inline uint32_t toku__lth_hash(toku_lth* lth, toku_lock_tree* key) {
size_t tmp = (size_t)key;
return tmp % lth->num_buckets;
}
......@@ -65,7 +65,7 @@ int toku_lth_create(toku_lth** plth,
toku_lock_tree* toku_lth_find(toku_lth* lth, toku_lock_tree* key) {
assert(lth && key);
u_int32_t index = toku__lth_hash(lth, key);
uint32_t index = toku__lth_hash(lth, key);
toku_lth_elt* head = &lth->buckets[index];
toku_lth_elt* current = head->next_in_bucket;
while (current) {
......@@ -104,7 +104,7 @@ void toku_lth_delete(toku_lth* lth, toku_lock_tree* key) {
/* Must have elements. */
assert(lth->num_keys);
u_int32_t index = toku__lth_hash(lth, key);
uint32_t index = toku__lth_hash(lth, key);
toku_lth_elt* head = &lth->buckets[index];
toku_lth_elt* prev = head;
toku_lth_elt* current = prev->next_in_bucket;
......@@ -130,7 +130,7 @@ int toku_lth_insert(toku_lth* lth, toku_lock_tree* key) {
assert(lth && key);
toku__invalidate_scan(lth);
u_int32_t index = toku__lth_hash(lth, key);
uint32_t index = toku__lth_hash(lth, key);
/* Allocate a new one. */
toku_lth_elt* element = (toku_lth_elt*)lth->malloc(sizeof(*element));
......
......@@ -47,8 +47,8 @@ typedef struct __toku_lth toku_lth;
struct __toku_lth {
toku_lth_elt* buckets;
u_int32_t num_buckets;
u_int32_t num_keys;
uint32_t num_buckets;
uint32_t num_keys;
toku_lth_elt iter_head;
toku_lth_elt* iter_curr;
BOOL iter_is_valid;
......
......@@ -16,11 +16,11 @@
#include <string.h>
/* TODO: reallocate the hash rth if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */
const u_int32_t __toku_rth_init_size = 521;
const uint32_t __toku_rth_init_size = 521;
static inline u_int32_t toku__rth_hash(toku_rth* rth, TXNID key) {
u_int64_t tmp = (u_int64_t)key;
return (u_int32_t)(tmp % rth->num_buckets);
static inline uint32_t toku__rth_hash(toku_rth* rth, TXNID key) {
uint64_t tmp = (uint64_t)key;
return (uint32_t)(tmp % rth->num_buckets);
}
static inline void toku__invalidate_scan(toku_rth* rth) {
......@@ -65,7 +65,7 @@ int toku_rth_create(toku_rth** prth,
rt_forest* toku_rth_find(toku_rth* rth, TXNID key) {
assert(rth);
u_int32_t index = toku__rth_hash(rth, key);
uint32_t index = toku__rth_hash(rth, key);
toku_rth_elt* head = &rth->buckets[index];
toku_rth_elt* current = head->next_in_bucket;
while (current) {
......@@ -104,7 +104,7 @@ void toku_rth_delete(toku_rth* rth, TXNID key) {
/* Must have elements. */
assert(rth->num_keys);
u_int32_t index = toku__rth_hash(rth, key);
uint32_t index = toku__rth_hash(rth, key);
toku_rth_elt* head = &rth->buckets[index];
toku_rth_elt* prev = head;
toku_rth_elt* current = prev->next_in_bucket;
......@@ -130,7 +130,7 @@ int toku_rth_insert(toku_rth* rth, TXNID key) {
assert(rth);
toku__invalidate_scan(rth);
u_int32_t index = toku__rth_hash(rth, key);
uint32_t index = toku__rth_hash(rth, key);
/* Allocate a new one. */
toku_rth_elt* element = (toku_rth_elt*)rth->malloc(sizeof(*element));
......
......@@ -23,8 +23,8 @@ extern "C" {
typedef struct __rt_forest rt_forest;
struct __rt_forest {
TXNID hash_key;
toku_range_tree* self_read;
toku_range_tree* self_write;
toku_range_tree* self_read; //Set of range read locks held by txn 'hash_key'
toku_range_tree* self_write; //Set of range write locks held by txn 'hash_key'
};
typedef struct __toku_rth_elt toku_rth_elt;
......@@ -38,8 +38,8 @@ struct __toku_rth_elt {
typedef struct __toku_rth toku_rth;
struct __toku_rth {
toku_rth_elt* buckets;
u_int32_t num_buckets;
u_int32_t num_keys;
uint32_t num_buckets;
uint32_t num_keys;
toku_rth_elt iter_head;
toku_rth_elt* iter_curr;
BOOL iter_is_valid;
......
......@@ -33,16 +33,11 @@ static inline int dbcmp (DB *db __attribute__((__unused__)), const DBT *a, const
}
toku_dbt_cmp compare_fun = dbcmp;
toku_dbt_cmp dup_compare = dbcmp;
static inline toku_dbt_cmp get_compare_fun_from_db(__attribute__((unused)) DB* db) {
return compare_fun;
}
static inline toku_dbt_cmp get_dup_compare_from_db(__attribute__((unused)) DB* db) {
return dup_compare;
}
BOOL panicked = FALSE;
static inline int dbpanic(DB* db, int r) {
......@@ -78,17 +73,17 @@ static inline void parse_args (int argc, const char *argv[]) {
}
// Simle LCG random number generator. Not high quality, but good enough.
static u_int32_t rstate=1;
static uint32_t rstate=1;
static inline void mysrandom (int s) {
rstate=s;
}
static inline u_int32_t myrandom (void) {
rstate = (279470275ull*(u_int64_t)rstate)%4294967291ull;
static inline uint32_t myrandom (void) {
rstate = (279470275ull*(uint64_t)rstate)%4294967291ull;
return rstate;
}
static inline DBT *dbt_init(DBT *dbt, void *data, u_int32_t size) {
static inline DBT *dbt_init(DBT *dbt, void *data, uint32_t size) {
memset(dbt, 0, sizeof *dbt);
dbt->data = data;
dbt->size = size;
......@@ -98,12 +93,12 @@ static inline DBT *dbt_init(DBT *dbt, void *data, u_int32_t size) {
/**
A comparison function between toku_point's.
It is implemented as a wrapper of db compare and dup_compare functions,
It is implemented as a wrapper of db compare functions,
but it checks whether the point is +/- infty.
Parameters are of type toku_point.
Return values conform to cmp from qsort(3).
*/
// extern int toku__lt_point_cmp(void* a, void* b);
// extern int toku_lt_point_cmp(void* a, void* b);
static inline void init_point(toku_point* point, toku_lock_tree* tree) {
assert(point && tree);
......
......@@ -4,9 +4,10 @@ int main(void) {
int r;
toku_lock_tree* lt = NULL;
toku_ltm* mgr = NULL;
u_int32_t max_locks = 1000;
uint32_t max_locks = 1000;
uint64_t max_lock_memory = max_locks*64;
r = toku_ltm_create(&mgr, max_locks, dbpanic,
r = toku_ltm_create(&mgr, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......
......@@ -4,7 +4,10 @@
static DBT _key;
DBT* key;
u_int32_t max_locks = 1000;
enum { MAX_LT_LOCKS = 1000 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
toku_ltm* ltm = NULL;
static void do_range_test(int (*acquire)(toku_lock_tree*, DB*, TXNID,
......@@ -97,34 +100,39 @@ int main(int argc, const char *argv[]) {
int r;
toku_lock_tree* lt = NULL;
r = toku_ltm_create(NULL, max_locks, dbpanic,
r = toku_ltm_create(NULL, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR2(r, EINVAL);
assert(ltm == NULL);
r = toku_ltm_create(&ltm, 0, dbpanic,
r = toku_ltm_create(&ltm, 0, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR2(r, EINVAL);
assert(ltm == NULL);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, 0, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR2(r, EINVAL);
assert(ltm == NULL);
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
NULL, toku_free, toku_realloc);
CKERR2(r, EINVAL);
assert(ltm == NULL);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, NULL, toku_realloc);
CKERR2(r, EINVAL);
assert(ltm == NULL);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, NULL);
CKERR2(r, EINVAL);
assert(ltm == NULL);
/* Actually create it. */
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......@@ -137,7 +145,7 @@ int main(int argc, const char *argv[]) {
r = toku_ltm_set_max_locks(ltm, max_locks);
CKERR(r);
u_int32_t get_max = 73; //Some random number that isn't 0.
uint32_t get_max = 73; //Some random number that isn't 0.
r = toku_ltm_get_max_locks(NULL, &get_max);
CKERR2(r, EINVAL);
assert(get_max == 73);
......@@ -148,6 +156,24 @@ int main(int argc, const char *argv[]) {
CKERR(r);
assert(get_max == max_locks);
r = toku_ltm_set_max_lock_memory(NULL, max_lock_memory);
CKERR2(r, EINVAL);
r = toku_ltm_set_max_lock_memory(ltm, 0);
CKERR2(r, EINVAL);
r = toku_ltm_set_max_lock_memory(ltm, max_lock_memory);
CKERR(r);
uint64_t get_max_memory = 73; //Some random number that isn't 0.
r = toku_ltm_get_max_lock_memory(NULL, &get_max_memory);
CKERR2(r, EINVAL);
assert(get_max_memory == 73);
r = toku_ltm_get_max_lock_memory(ltm, NULL);
CKERR2(r, EINVAL);
assert(get_max_memory == 73);
r = toku_ltm_get_max_lock_memory(ltm, &get_max_memory);
CKERR(r);
assert(get_max_memory == max_lock_memory);
/* create tests. */
{
r = toku_lt_create(NULL, dbpanic, ltm,
......
......@@ -7,7 +7,9 @@ toku_lock_tree* lt = NULL;
toku_ltm* ltm = NULL;
DB* db = (DB*)1;
TXNID txn = (TXNID)1;
u_int32_t max_locks = 1000;
enum { MAX_LT_LOCKS = 1000 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
BOOL duplicates = FALSE;
int nums[100];
......@@ -40,7 +42,7 @@ static void init_query(void) {
static void setup_tree(void) {
assert(!lt && !ltm);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......@@ -90,7 +92,7 @@ static void lt_insert(int key_l, int key_r) {
CKERR(r);
}
static void setup_payload_len(void** payload, u_int32_t* len, int val) {
static void setup_payload_len(void** payload, uint32_t* len, int val) {
assert(payload && len);
DBT temp;
......@@ -133,8 +135,8 @@ temporarily_fake_comparison_functions();
setup_payload_len(&right.key_payload, &right.key_len, key_r);
unsigned i;
for (i = 0; i < numfound; i++) {
if (toku__lt_point_cmp(buf[i].ends.left, &left ) == 0 &&
toku__lt_point_cmp(buf[i].ends.right, &right) == 0 &&
if (toku_lt_point_cmp(buf[i].ends.left, &left ) == 0 &&
toku_lt_point_cmp(buf[i].ends.right, &right) == 0 &&
buf[i].data == find_txn) { goto cleanup; }
}
assert(FALSE); //Crash since we didn't find it.
......@@ -193,7 +195,7 @@ static void runtest(void) {
lt_insert(3, 7);
lt_insert(4, 5);
rt = toku__lt_ifexist_selfread(lt, txn);
rt = toku_lt_ifexist_selfread(lt, txn);
assert(rt);
lt_find(rt, 1,
......@@ -221,7 +223,7 @@ static void runtest(void) {
lt_insert(4, 5);
lt_insert(3, 7);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1,
3,
......@@ -243,7 +245,7 @@ static void runtest(void) {
lt_insert(3, 3);
lt_insert(4, 4);
lt_insert(3, 3);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 2, 3, 3, txn);
lt_find(rt, 2, 4, 4, txn);
#ifndef TOKU_RT_NOOVERLAPS
......@@ -258,7 +260,7 @@ static void runtest(void) {
for (i = 0; i < 20; i += 2) {
lt_insert(i, i + 1);
}
rt = toku__lt_ifexist_selfread(lt, txn);
rt = toku_lt_ifexist_selfread(lt, txn);
assert(rt);
for (i = 0; i < 20; i += 2) {
lt_find(rt, 10, i, i + 1, txn);
......@@ -270,7 +272,7 @@ static void runtest(void) {
}
#endif
lt_insert(0, 20);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find( rt, 1, 0, 20, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -286,7 +288,7 @@ static void runtest(void) {
lt_insert(4, 5);
lt_insert(3, 4);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 2, 0, 2, txn);
lt_find(rt, 2, 3, 5, txn);
#ifndef TOKU_RT_NOOVERLAPS
......@@ -297,7 +299,7 @@ static void runtest(void) {
lt_insert(2, 3);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, 0, 5, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -310,7 +312,7 @@ static void runtest(void) {
lt_insert(1, 3);
lt_insert(4, 6);
lt_insert(2, 5);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, 1, 6, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -323,7 +325,7 @@ static void runtest(void) {
lt_insert( 4, 5);
lt_insert( 6, 8);
lt_insert( 2, 7);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, neg_infinite, 8, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -335,7 +337,7 @@ static void runtest(void) {
lt_insert(1, 2);
lt_insert(3, infinite);
lt_insert(2, 3);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, 1, infinite, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -348,7 +350,7 @@ static void runtest(void) {
lt_insert(3, 4);
lt_insert(5, 6);
lt_insert(2, 5);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, 1, 6, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......@@ -360,7 +362,7 @@ static void runtest(void) {
lt_insert(1, 2);
lt_insert(3, 5);
lt_insert(2, 4);
rt = toku__lt_ifexist_selfread(lt, txn); assert(rt);
rt = toku_lt_ifexist_selfread(lt, txn); assert(rt);
lt_find(rt, 1, 1, 5, txn);
#ifndef TOKU_RT_NOOVERLAPS
rt = lt->mainread; assert(rt);
......
......@@ -6,7 +6,9 @@ int r;
toku_lock_tree* lt = NULL;
toku_ltm* ltm = NULL;
DB* db = (DB*)1;
u_int32_t max_locks = 1000;
enum { MAX_LT_LOCKS = 1000 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
int nums[100];
DBT _keys_left[2];
......@@ -34,7 +36,7 @@ static void init_query(void) {
static void setup_tree(void) {
assert(!lt && !ltm);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......
......@@ -6,7 +6,9 @@ int r;
toku_lock_tree* lt = NULL;
toku_ltm* ltm = NULL;
DB* db = (DB*)1;
u_int32_t max_locks = 10;
enum { MAX_LT_LOCKS = 10 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
BOOL duplicates = FALSE;
int nums[10000];
......@@ -35,14 +37,15 @@ static void init_query(void) {
static void setup_tree(void) {
assert(!lt && !ltm);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
assert(ltm);
r = toku_lt_create(&lt, dbpanic, ltm,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
//ask ltm for lock tree
DICTIONARY_ID dict_id = {0x1234};
r = toku_ltm_get_lt(ltm, &lt, dict_id, db);
CKERR(r);
assert(lt);
init_query();
......@@ -50,8 +53,8 @@ static void setup_tree(void) {
static void close_tree(void) {
assert(lt && ltm);
r = toku_lt_close(lt);
CKERR(r);
toku_lt_remove_db_ref(lt, db);
r = toku_ltm_close(ltm);
CKERR(r);
lt = NULL;
......@@ -354,7 +357,6 @@ static void init_test(void) {
buflen = 64;
buf = (toku_range*) toku_malloc(buflen*sizeof(toku_range));
compare_fun = intcmp;
dup_compare = intcmp;
}
static void close_test(void) {
......
......@@ -10,12 +10,14 @@ int r;
toku_lock_tree* lt [10] = {0};
toku_ltm* ltm = NULL;
DB* db = (DB*)1;
u_int32_t max_locks = 10;
enum { MAX_LT_LOCKS = 10 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
int nums[10000];
static void setup_ltm(void) {
assert(!ltm);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......@@ -24,7 +26,7 @@ static void setup_ltm(void) {
static void setup_tree(size_t index, DICTIONARY_ID dict_id) {
assert(!lt[index] && ltm);
r = toku_ltm_get_lt(ltm, &lt[index], dict_id);
r = toku_ltm_get_lt(ltm, &lt[index], dict_id, NULL);
CKERR(r);
assert(lt[index]);
}
......@@ -34,7 +36,7 @@ static void close_ltm(void) {
assert(ltm);
r = toku_ltm_close(ltm);
CKERR(r);
u_int32_t i = 0;
uint32_t i = 0;
for (i = 0; i < sizeof(lt)/sizeof(*lt); i++) { lt[i] = NULL; }
ltm = NULL;
}
......@@ -68,7 +70,6 @@ static void run_test(void) {
int main(int argc, const char *argv[]) {
parse_args(argc, argv);
compare_fun = intcmp;
dup_compare = intcmp;
r = system("rm -rf " TESTDIR);
CKERR(r);
......
......@@ -9,16 +9,18 @@
static void initial_setup(void);
static int r;
static u_int32_t lt_refs[100];
static uint32_t lt_refs[100];
static toku_lock_tree* lts [100];
static toku_ltm* ltm = NULL;
static DICTIONARY_ID dict_ids[100];
static u_int32_t max_locks = 10;
enum { MAX_LT_LOCKS = 10 };
uint32_t max_locks = MAX_LT_LOCKS;
uint64_t max_lock_memory = MAX_LT_LOCKS*64;
int nums[10000];
static void setup_ltm(void) {
assert(!ltm);
r = toku_ltm_create(&ltm, max_locks, dbpanic,
r = toku_ltm_create(&ltm, max_locks, max_lock_memory, dbpanic,
get_compare_fun_from_db,
toku_malloc, toku_free, toku_realloc);
CKERR(r);
......@@ -30,7 +32,7 @@ static void db_open_tree(size_t index, size_t db_id_index) {
(lt_refs[index] > 0 && lts[index]));
assert(ltm);
lt_refs[index]++;
r = toku_ltm_get_lt(ltm, &lts[index], dict_ids[db_id_index]);
r = toku_ltm_get_lt(ltm, &lts[index], dict_ids[db_id_index], NULL);
CKERR(r);
assert(lts[index]);
}
......@@ -112,7 +114,7 @@ static void run_test(void) {
}
static void initial_setup(void) {
u_int32_t i;
uint32_t i;
ltm = NULL;
assert(sizeof(dict_ids) / sizeof(dict_ids[0]) == sizeof(lts) / sizeof(lts[0]));
......@@ -127,7 +129,7 @@ static void initial_setup(void) {
}
static void close_test(void) {
u_int32_t i;
uint32_t i;
for (i = 0; i < sizeof(lts) / sizeof(lts[0]); i++) {
assert(lt_refs[i]==0); //The internal reference isn't counted.
assert(dict_ids[i].dictid != DICTIONARY_ID_NONE.dictid);
......@@ -137,7 +139,6 @@ static void close_test(void) {
int main(int argc, const char *argv[]) {
parse_args(argc, argv);
compare_fun = intcmp;
dup_compare = intcmp;
r = system("rm -rf " TESTDIR);
CKERR(r);
......
......@@ -22,6 +22,8 @@ test_setup (void) {
r=db_env_create(&env, 0); CKERR(r);
env->set_errfile(env, stderr);
multiply_locks_for_n_dbs(env, NFILES);
r=env->open(env, ENVDIR, DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_MPOOL|DB_INIT_TXN|DB_CREATE|DB_PRIVATE, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r);
r=env->txn_begin(env, 0, &txn, 0); assert(r==0);
......@@ -64,7 +66,7 @@ doit (void) {
dbt_init(&data, str, 1+strlen(str));
for (i=0; i<NFILES; i++) {
r = dbs[i]->put(dbs[i], txn, &key, &data, DB_YESOVERWRITE);
assert(r==0);
CKERR(r);
}
}
r=txn->commit(txn, 0); assert(r==0);
......
......@@ -287,6 +287,23 @@ toku_hard_crash_on_purpose(void) {
fflush(stderr);
}
static void UU()
multiply_locks_for_n_dbs(DB_ENV *env, int num_dbs) {
int r;
uint32_t current_max_locks;
r = env->get_lk_max_locks(env, &current_max_locks);
CKERR(r);
r = env->set_lk_max_locks(env, current_max_locks * num_dbs);
CKERR(r);
#if defined(USE_TDB)
uint64_t current_max_lock_memory;
r = env->get_lk_max_memory(env, &current_max_lock_memory);
CKERR(r);
r = env->set_lk_max_memory(env, current_max_lock_memory * num_dbs);
CKERR(r);
#endif
}
#if defined(__cilkplusplus) || defined(__cplusplus)
}
#endif
......
......@@ -95,6 +95,7 @@ single_process_unlock(int *lockfd) {
/** The default maximum number of persistent locks in a lock tree */
const u_int32_t __toku_env_default_max_locks = 1000;
const uint64_t __toku_env_default_max_lock_memory = 1000*1024;
static inline DBT*
init_dbt_realloc(DBT *dbt) {
......@@ -1103,7 +1104,7 @@ static int toku_env_set_lk_max_locks(DB_ENV *dbenv, u_int32_t max) {
int r = ENOSYS;
HANDLE_PANICKED_ENV(dbenv);
if (env_opened(dbenv)) { return EINVAL; }
r = toku_ltm_set_max_locks_per_db(dbenv->i->ltm, max);
r = toku_ltm_set_max_locks(dbenv->i->ltm, max);
return r;
}
......@@ -1119,17 +1120,38 @@ static int locked_env_set_lk_max(DB_ENV * env, u_int32_t lk_max) {
static int toku_env_get_lk_max_locks(DB_ENV *dbenv, u_int32_t *lk_maxp) {
HANDLE_PANICKED_ENV(dbenv);
return toku_ltm_get_max_locks_per_db(dbenv->i->ltm, lk_maxp);
return toku_ltm_get_max_locks(dbenv->i->ltm, lk_maxp);
}
static int locked_env_set_lk_max_locks(DB_ENV *dbenv, u_int32_t max) {
toku_ydb_lock(); int r = toku_env_set_lk_max_locks(dbenv, max); toku_ydb_unlock(); return r;
}
static int __attribute__((unused)) locked_env_get_lk_max_locks(DB_ENV *dbenv, u_int32_t *lk_maxp) {
static int locked_env_get_lk_max_locks(DB_ENV *dbenv, u_int32_t *lk_maxp) {
toku_ydb_lock(); int r = toku_env_get_lk_max_locks(dbenv, lk_maxp); toku_ydb_unlock(); return r;
}
static int toku_env_set_lk_max_memory(DB_ENV *dbenv, uint64_t max) {
int r = ENOSYS;
HANDLE_PANICKED_ENV(dbenv);
if (env_opened(dbenv)) { return EINVAL; }
r = toku_ltm_set_max_lock_memory(dbenv->i->ltm, max);
return r;
}
static int toku_env_get_lk_max_memory(DB_ENV *dbenv, uint64_t *lk_maxp) {
HANDLE_PANICKED_ENV(dbenv);
return toku_ltm_get_max_lock_memory(dbenv->i->ltm, lk_maxp);
}
static int locked_env_set_lk_max_memory(DB_ENV *dbenv, uint64_t max) {
toku_ydb_lock(); int r = toku_env_set_lk_max_memory(dbenv, max); toku_ydb_unlock(); return r;
}
static int locked_env_get_lk_max_memory(DB_ENV *dbenv, uint64_t *lk_maxp) {
toku_ydb_lock(); int r = toku_env_get_lk_max_memory(dbenv, lk_maxp); toku_ydb_unlock(); return r;
}
//void toku__env_set_noticecall (DB_ENV *env, void (*noticecall)(DB_ENV *, db_notices)) {
// env->i->noticecall = noticecall;
//}
......@@ -1524,11 +1546,15 @@ env_get_engine_status(DB_ENV * env, ENGINE_STATUS * engstat) {
{
toku_ltm* ltm = env->i->ltm;
LTM_STATUS_S ltmstat;
uint32_t max_locks, curr_locks, max_locks_per_db;
toku_ltm_get_status(ltm, &max_locks, &curr_locks, &max_locks_per_db, &ltmstat);
uint32_t max_locks, curr_locks;
uint64_t max_lock_memory, curr_lock_memory;
toku_ltm_get_status(ltm, &max_locks, &curr_locks,
&max_lock_memory, &curr_lock_memory,
&ltmstat);
engstat->range_locks_max = max_locks;
engstat->range_locks_max_per_index = max_locks_per_db;
engstat->range_locks_curr = curr_locks;
engstat->range_locks_max_memory = max_lock_memory;
engstat->range_locks_curr_memory = curr_lock_memory;
engstat->range_lock_escalation_successes = ltmstat.lock_escalation_successes;
engstat->range_lock_escalation_failures = ltmstat.lock_escalation_failures;
engstat->range_read_locks = ltmstat.read_lock;
......@@ -1657,8 +1683,9 @@ env_get_engine_status_text(DB_ENV * env, char * buff, int bufsiz) {
n += snprintf(buff + n, bufsiz - n, "local_checkpoint_files %"PRId64"\n", engstat.local_checkpoint_files);
n += snprintf(buff + n, bufsiz - n, "local_checkpoint_during_checkpoint %"PRId64"\n", engstat.local_checkpoint_during_checkpoint);
n += snprintf(buff + n, bufsiz - n, "range_locks_max %"PRIu32"\n", engstat.range_locks_max);
n += snprintf(buff + n, bufsiz - n, "range_locks_max_per_index %"PRIu32"\n", engstat.range_locks_max_per_index);
n += snprintf(buff + n, bufsiz - n, "range_locks_curr %"PRIu32"\n", engstat.range_locks_curr);
n += snprintf(buff + n, bufsiz - n, "range_locks_max_memory %"PRIu64"\n", engstat.range_locks_max_memory);
n += snprintf(buff + n, bufsiz - n, "range_locks_curr_memory %"PRIu64"\n", engstat.range_locks_curr_memory);
n += snprintf(buff + n, bufsiz - n, "range_locks_escalation_successes %"PRIu32"\n", engstat.range_lock_escalation_successes);
n += snprintf(buff + n, bufsiz - n, "range_locks_escalation_failures %"PRIu32"\n", engstat.range_lock_escalation_failures);
n += snprintf(buff + n, bufsiz - n, "range_read_locks %"PRIu64"\n", engstat.range_read_locks);
......@@ -1756,6 +1783,8 @@ static int toku_env_create(DB_ENV ** envp, u_int32_t flags) {
SENV(get_lg_max);
SENV(set_lk_max_locks);
SENV(get_lk_max_locks);
SENV(set_lk_max_memory);
SENV(get_lk_max_memory);
SENV(set_cachesize);
#if DB_VERSION_MAJOR == 4 && DB_VERSION_MINOR >= 3
SENV(get_cachesize);
......@@ -1781,7 +1810,8 @@ static int toku_env_create(DB_ENV ** envp, u_int32_t flags) {
env_init_open_txn(result);
env_fs_init(result);
r = toku_ltm_create(&result->i->ltm, __toku_env_default_max_locks,
r = toku_ltm_create(&result->i->ltm,
__toku_env_default_max_locks, __toku_env_default_max_lock_memory,
toku_db_lt_panic,
toku_db_get_compare_fun,
toku_malloc, toku_free, toku_realloc);
......@@ -2176,11 +2206,7 @@ db_close_before_brt(DB *db, u_int32_t UU(flags)) {
assert(error_string==0);
int r2 = 0;
if (db->i->lt) {
r2 = toku_lt_remove_ref(db->i->lt);
if (r2) {
db->dbenv->i->is_panicked = r2; // Panicking the whole environment may be overkill, but I'm not sure what else to do.
db->dbenv->i->panic_string = 0;
}
toku_lt_remove_db_ref(db->i->lt, db);
}
// printf("%s:%d %d=__toku_db_close(%p)\n", __FILE__, __LINE__, r, db);
// Even if panicked, let's close as much as we can.
......@@ -3780,7 +3806,7 @@ db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags,
db->i->opened = 1;
if (need_locktree) {
db->i->dict_id = toku_brt_get_dictionary_id(db->i->brt);
r = toku_ltm_get_lt(db->dbenv->i->ltm, &db->i->lt, db->i->dict_id);
r = toku_ltm_get_lt(db->dbenv->i->ltm, &db->i->lt, db->i->dict_id, db);
if (r!=0) { goto error_cleanup; }
}
//Add to transaction's list of 'must close' if necessary.
......@@ -3796,7 +3822,7 @@ db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, u_int32_t flags,
db->i->dict_id = DICTIONARY_ID_NONE;
db->i->opened = 0;
if (db->i->lt) {
toku_lt_remove_ref(db->i->lt);
toku_lt_remove_db_ref(db->i->lt, db);
db->i->lt = NULL;
}
return r;
......@@ -4754,11 +4780,21 @@ char *db_strerror(int error) {
return errorstr;
}
if (error==DB_BADFORMAT) {
switch (error) {
case DB_BADFORMAT:
return "Database Bad Format (probably a corrupted database)";
}
if (error==DB_NOTFOUND) {
case DB_NOTFOUND:
return "Not found";
case TOKUDB_OUT_OF_LOCKS:
return "Out of locks";
case TOKUDB_DICTIONARY_TOO_OLD:
return "Dictionary too old for this version of TokuDB";
case TOKUDB_DICTIONARY_TOO_NEW:
return "Dictionary too new for this version of TokuDB";
case TOKUDB_CANCELED:
return "User cancelled operation";
case TOKUDB_NO_DATA:
return "Ran out of data (not EOF)";
}
static char unknown_result[100]; // Race condition if two threads call this at the same time. However even in a bad case, it should be some sort of null-terminated string.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment