Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
3f4aa5f7
Commit
3f4aa5f7
authored
Nov 17, 2006
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-maria
into janus.mylan:/usr/home/serg/Abk/mysql-maria
parents
3becab22
915cebdd
Changes
16
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
471 additions
and
301 deletions
+471
-301
BUILD/SETUP.sh
BUILD/SETUP.sh
+2
-1
include/atomic/nolock.h
include/atomic/nolock.h
+1
-1
include/lf.h
include/lf.h
+32
-15
include/my_atomic.h
include/my_atomic.h
+0
-8
include/my_global.h
include/my_global.h
+10
-1
mysys/lf_alloc-pin.c
mysys/lf_alloc-pin.c
+15
-14
mysys/lf_dynarray.c
mysys/lf_dynarray.c
+23
-12
mysys/lf_hash.c
mysys/lf_hash.c
+60
-59
mysys/my_getsystime.c
mysys/my_getsystime.c
+0
-4
storage/maria/lockman.h
storage/maria/lockman.h
+4
-3
storage/maria/tablockman.c
storage/maria/tablockman.c
+240
-131
storage/maria/tablockman.h
storage/maria/tablockman.h
+26
-26
storage/maria/trnman.c
storage/maria/trnman.c
+2
-1
storage/maria/unittest/lockman-t.c
storage/maria/unittest/lockman-t.c
+4
-0
storage/maria/unittest/lockman1-t.c
storage/maria/unittest/lockman1-t.c
+14
-9
storage/maria/unittest/lockman2-t.c
storage/maria/unittest/lockman2-t.c
+38
-16
No files found.
BUILD/SETUP.sh
View file @
3f4aa5f7
...
...
@@ -117,8 +117,9 @@ valgrind_flags="-USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify "
valgrind_flags
=
"
$valgrind_flags
-DMYSQL_SERVER_SUFFIX=-valgrind-max"
#
# Used in -debug builds
debug_cflags
=
"-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS
"
debug_cflags
=
"-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS"
debug_cflags
=
"
$debug_cflags
-DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
debug_cflags
=
"
$debug_cflags
-DMY_LF_EXTRA_DEBUG"
error_inject
=
"--with-error-inject "
#
# Base C++ flags for all builds
...
...
include/atomic/nolock.h
View file @
3f4aa5f7
...
...
@@ -32,7 +32,7 @@
#ifdef make_atomic_cas_body
typedef
struct
{
}
my_atomic_rwlock_t
;
typedef
struct
{
}
my_atomic_rwlock_t
__attribute__
((
unused
))
;
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name)
...
...
include/lf.h
View file @
3f4aa5f7
...
...
@@ -24,7 +24,7 @@
func() is a _func() protected by my_atomic_rwlock_wrlock()
*/
#define lock_wrap(f,
t,proto_args, args, lock)
\
#define lock_wrap(f,
t, proto_args, args, lock)
\
t _ ## f proto_args; \
static inline t f proto_args \
{ \
...
...
@@ -35,7 +35,7 @@ static inline t f proto_args \
return ret; \
}
#define lock_wrap_void(f,proto_args, args, lock) \
#define lock_wrap_void(f,
proto_args, args, lock) \
void _ ## f proto_args; \
static inline void f proto_args \
{ \
...
...
@@ -44,14 +44,14 @@ static inline void f proto_args \
my_atomic_rwlock_wrunlock(lock); \
}
#define nolock_wrap(f,
t,proto_args, args)
\
#define nolock_wrap(f,
t, proto_args, args)
\
t _ ## f proto_args; \
static inline t f proto_args \
{ \
return _ ## f args; \
}
#define nolock_wrap_void(f,
proto_args, args)
\
#define nolock_wrap_void(f,
proto_args, args)
\
void _ ## f proto_args; \
static inline void f proto_args \
{ \
...
...
@@ -80,14 +80,14 @@ void lf_dynarray_destroy(LF_DYNARRAY *array);
nolock_wrap
(
lf_dynarray_value
,
void
*
,
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
array
,
idx
));
(
array
,
idx
));
lock_wrap
(
lf_dynarray_lvalue
,
void
*
,
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
array
,
idx
),
(
array
,
idx
),
&
array
->
lock
);
nolock_wrap
(
lf_dynarray_iterate
,
int
,
(
LF_DYNARRAY
*
array
,
lf_dynarray_func
func
,
void
*
arg
),
(
array
,
func
,
arg
));
(
array
,
func
,
arg
));
/*
pin manager for memory allocator, lf_alloc-pin.c
...
...
@@ -115,9 +115,14 @@ typedef struct {
uint32
volatile
link
;
/* we want sizeof(LF_PINS) to be 128 to avoid false sharing */
char
pad
[
128
-
sizeof
(
uint32
)
*
2
-
sizeof
(
void
*
)
*
(
LF_PINBOX_PINS
+
2
)];
-
sizeof
(
LF_PINBOX
*
)
-
sizeof
(
void
*
)
*
(
LF_PINBOX_PINS
+
1
)];
}
LF_PINS
;
/*
shortcut macros to do an atomic_wrlock on a structure that uses pins
(e.g. lf_hash).
*/
#define lf_rwlock_by_pins(PINS) \
my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinstack.lock)
#define lf_rwunlock_by_pins(PINS) \
...
...
@@ -131,11 +136,11 @@ typedef struct {
#if defined(__GNUC__) && defined(MY_LF_EXTRA_DEBUG)
#define LF_REQUIRE_PINS(N) \
static const char require_pins[LF_PINBOX_PINS-N]; \
static const int LF_NUM_PINS_IN_THIS_FILE=N;
static const int LF_NUM_PINS_IN_THIS_FILE=
N;
#define _lf_pin(PINS, PIN, ADDR) \
( \
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR)),
\
assert(PIN < LF_NUM_PINS_IN_THIS_FILE)
\
assert(PIN < LF_NUM_PINS_IN_THIS_FILE),
\
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR))
\
)
#else
#define LF_REQUIRE_PINS(N)
...
...
@@ -151,7 +156,7 @@ typedef struct {
} while (0)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define _lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0)
#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN]
==
0)
#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN]
==
0)
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
);
...
...
@@ -167,16 +172,20 @@ lock_wrap_void(lf_pinbox_put_pins,
&
pins
->
pinbox
->
pinstack
.
lock
);
lock_wrap_void
(
lf_pinbox_free
,
(
LF_PINS
*
pins
,
void
*
addr
),
(
pins
,
addr
),
(
pins
,
addr
),
&
pins
->
pinbox
->
pinstack
.
lock
);
/*
memory allocator, lf_alloc-pin.c
*/
struct
st_lf_alloc_node
{
struct
st_lf_alloc_node
*
next
;
};
typedef
struct
st_lf_allocator
{
LF_PINBOX
pinbox
;
void
*
volatile
top
;
struct
st_lf_alloc_node
*
volatile
top
;
uint
element_size
;
uint32
volatile
mallocs
;
}
LF_ALLOCATOR
;
...
...
@@ -184,13 +193,17 @@ typedef struct st_lf_allocator {
void
lf_alloc_init
(
LF_ALLOCATOR
*
allocator
,
uint
size
,
uint
free_ptr_offset
);
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
);
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
);
/*
shortcut macros to access underlying pinbox functions from an LF_ALLOCATOR
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
*/
#define _lf_alloc_free(PINS, PTR) _lf_pinbox_free((PINS), (PTR))
#define lf_alloc_free(PINS, PTR) lf_pinbox_free((PINS), (PTR))
#define _lf_alloc_get_pins(ALLOC) _lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define lf_alloc_get_pins(ALLOC) lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define _lf_alloc_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define lf_alloc_put_pins(PINS) lf_pinbox_put_pins(PINS)
#define lf_alloc_real_free(ALLOC,ADDR) my_free((gptr)(ADDR), MYF(0))
#define lf_alloc_real_free(ALLOC,
ADDR) my_free((gptr)(ADDR), MYF(0))
lock_wrap
(
lf_alloc_new
,
void
*
,
(
LF_PINS
*
pins
),
...
...
@@ -222,6 +235,10 @@ void lf_hash_destroy(LF_HASH *hash);
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
);
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
/*
shortcut macros to access underlying pinbox functions from an LF_HASH
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
*/
#define _lf_hash_get_pins(HASH) _lf_alloc_get_pins(&(HASH)->alloc)
#define lf_hash_get_pins(HASH) lf_alloc_get_pins(&(HASH)->alloc)
#define _lf_hash_put_pins(PINS) _lf_pinbox_put_pins(PINS)
...
...
include/my_atomic.h
View file @
3f4aa5f7
...
...
@@ -173,14 +173,6 @@ make_atomic_fas(ptr)
#define LF_BACKOFF (1)
#endif
#if SIZEOF_CHARP == SIZEOF_INT
typedef
int
intptr
;
#elif SIZEOF_CHARP == SIZEOF_LONG
typedef
long
intptr
;
#else
#error
#endif
#define MY_ATOMIC_OK 0
#define MY_ATOMIC_NOT_1CPU 1
extern
int
my_atomic_initialize
();
...
...
include/my_global.h
View file @
3f4aa5f7
...
...
@@ -432,7 +432,8 @@ C_MODE_END
#define compile_time_assert(X) \
do \
{ \
char compile_time_assert[(X) ? 1 : -1]; \
char compile_time_assert[(X) ? 1 : -1] \
__attribute__ ((unused)); \
} while(0)
/* Go around some bugs in different OS and compilers */
...
...
@@ -964,6 +965,14 @@ typedef unsigned __int64 my_ulonglong;
typedef
unsigned
long
long
my_ulonglong
;
#endif
#if SIZEOF_CHARP == SIZEOF_INT
typedef
int
intptr
;
#elif SIZEOF_CHARP == SIZEOF_LONG
typedef
long
intptr
;
#else
#error
#endif
#ifdef USE_RAID
/*
The following is done with a if to not get problems with pre-processors
...
...
mysys/lf_alloc-pin.c
View file @
3f4aa5f7
...
...
@@ -91,7 +91,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins);
See the latter for details.
*/
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
)
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
)
{
DBUG_ASSERT
(
sizeof
(
LF_PINS
)
==
128
);
DBUG_ASSERT
(
free_ptr_offset
%
sizeof
(
void
*
)
==
0
);
...
...
@@ -306,7 +306,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
{
if
(
addr
)
/* use binary search */
{
void
**
a
,
**
b
,
**
c
;
void
**
a
,
**
b
,
**
c
;
for
(
a
=
addr
,
b
=
addr
+
npins
-
1
,
c
=
a
+
(
b
-
a
)
/
2
;
b
-
a
>
1
;
c
=
a
+
(
b
-
a
)
/
2
)
if
(
cur
==
*
c
)
a
=
b
=
c
;
...
...
@@ -337,13 +337,13 @@ found:
callback for _lf_pinbox_real_free to free an unpinned object -
add it back to the allocator stack
*/
static
void
alloc_free
(
void
*
node
,
LF_ALLOCATOR
*
allocator
)
static
void
alloc_free
(
struct
st_lf_alloc_node
*
node
,
LF_ALLOCATOR
*
allocator
)
{
void
*
tmp
;
struct
st_lf_alloc_node
*
tmp
;
tmp
=
allocator
->
top
;
do
{
(
*
(
void
**
)
node
)
=
tmp
;
node
->
next
=
tmp
;
}
while
(
!
my_atomic_casptr
((
void
**
)
&
allocator
->
top
,
(
void
**
)
&
tmp
,
node
)
&&
LF_BACKOFF
);
}
...
...
@@ -379,12 +379,12 @@ void lf_alloc_init(LF_ALLOCATOR *allocator, uint size, uint free_ptr_offset)
*/
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
)
{
void
*
el
=
allocator
->
top
;
while
(
el
)
struct
st_lf_alloc_node
*
node
=
allocator
->
top
;
while
(
node
)
{
void
*
tmp
=
*
(
void
**
)
el
;
my_free
(
el
,
MYF
(
0
));
el
=
tmp
;
struct
st_lf_alloc_node
*
tmp
=
node
->
next
;
my_free
(
(
void
*
)
node
,
MYF
(
0
));
node
=
tmp
;
}
lf_pinbox_destroy
(
&
allocator
->
pinbox
);
allocator
->
top
=
0
;
...
...
@@ -400,7 +400,7 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator)
void
*
_lf_alloc_new
(
LF_PINS
*
pins
)
{
LF_ALLOCATOR
*
allocator
=
(
LF_ALLOCATOR
*
)(
pins
->
pinbox
->
free_func_arg
);
void
*
node
;
struct
st_lf_alloc_node
*
node
;
for
(;;)
{
do
...
...
@@ -410,7 +410,8 @@ void *_lf_alloc_new(LF_PINS *pins)
}
while
(
node
!=
allocator
->
top
&&
LF_BACKOFF
);
if
(
!
node
)
{
if
(
!
(
node
=
my_malloc
(
allocator
->
element_size
,
MYF
(
MY_WME
|
MY_ZEROFILL
))))
if
(
!
(
node
=
(
void
*
)
my_malloc
(
allocator
->
element_size
,
MYF
(
MY_WME
|
MY_ZEROFILL
))))
break
;
#ifdef MY_LF_EXTRA_DEBUG
my_atomic_add32
(
&
allocator
->
mallocs
,
1
);
...
...
@@ -434,8 +435,8 @@ void *_lf_alloc_new(LF_PINS *pins)
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
)
{
uint
i
;
void
*
node
;
for
(
node
=
allocator
->
top
,
i
=
0
;
node
;
node
=
*
(
void
**
)
node
,
i
++
)
struct
st_lf_alloc_node
*
node
;
for
(
node
=
allocator
->
top
,
i
=
0
;
node
;
node
=
node
->
next
,
i
++
)
/* no op */
;
return
i
;
}
...
...
mysys/lf_dynarray.c
View file @
3f4aa5f7
...
...
@@ -19,9 +19,9 @@
(so no pointer into the array may ever become invalid).
Memory is allocated in non-contiguous chunks.
This data structure is not space efficient for spar
c
e arrays.
This data structure is not space efficient for spar
s
e arrays.
The number of elements is limited to
2^16
The number of elements is limited to
4311810304
Every element is aligned to sizeof(element) boundary
(to avoid false sharing if element is big enough).
...
...
@@ -49,7 +49,8 @@ void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
static
void
recursive_free
(
void
**
alloc
,
int
level
)
{
if
(
!
alloc
)
return
;
if
(
!
alloc
)
return
;
if
(
level
)
{
...
...
@@ -68,10 +69,9 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
for
(
i
=
0
;
i
<
LF_DYNARRAY_LEVELS
;
i
++
)
recursive_free
(
array
->
level
[
i
],
i
);
my_atomic_rwlock_destroy
(
&
array
->
lock
);
bzero
(
array
,
sizeof
(
*
array
));
}
static
const
long
dynarray_idxes_in_prev_level
[
LF_DYNARRAY_LEVELS
]
=
static
const
ulong
dynarray_idxes_in_prev_levels
[
LF_DYNARRAY_LEVELS
]
=
{
0
,
/* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH
,
...
...
@@ -82,6 +82,15 @@ static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
LF_DYNARRAY_LEVEL_LENGTH
+
LF_DYNARRAY_LEVEL_LENGTH
};
static
const
ulong
dynarray_idxes_in_prev_level
[
LF_DYNARRAY_LEVELS
]
=
{
0
,
/* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH
,
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
,
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
,
};
/*
Returns a valid lvalue pointer to the element number 'idx'.
Allocates memory if necessary.
...
...
@@ -91,16 +100,17 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
int
i
;
for
(
i
=
3
;
idx
<
dynarray_idxes_in_prev_level
[
i
];
i
--
)
/* no-op */
;
for
(
i
=
LF_DYNARRAY_LEVELS
-
1
;
idx
<
dynarray_idxes_in_prev_levels
[
i
];
i
--
)
/* no-op */
;
ptr_ptr
=
&
array
->
level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
s
[
i
];
for
(;
i
>
0
;
i
--
)
{
if
(
!
(
ptr
=
*
ptr_ptr
))
{
void
*
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
sizeof
(
void
*
),
MYF
(
MY_WME
|
MY_ZEROFILL
));
if
(
!
alloc
)
MYF
(
MY_WME
|
MY_ZEROFILL
));
if
(
unlikely
(
!
alloc
)
)
return
(
NULL
);
if
(
my_atomic_casptr
(
ptr_ptr
,
&
ptr
,
alloc
))
ptr
=
alloc
;
...
...
@@ -116,7 +126,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
array
->
size_of_element
+
max
(
array
->
size_of_element
,
sizeof
(
void
*
)),
MYF
(
MY_WME
|
MY_ZEROFILL
));
if
(
!
alloc
)
if
(
unlikely
(
!
alloc
)
)
return
(
NULL
);
/* reserve the space for free() address */
data
=
alloc
+
sizeof
(
void
*
);
...
...
@@ -143,9 +153,10 @@ void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
int
i
;
for
(
i
=
3
;
idx
<
dynarray_idxes_in_prev_level
[
i
];
i
--
)
/* no-op */
;
for
(
i
=
LF_DYNARRAY_LEVELS
-
1
;
idx
<
dynarray_idxes_in_prev_levels
[
i
];
i
--
)
/* no-op */
;
ptr_ptr
=
&
array
->
level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
s
[
i
];
for
(;
i
>
0
;
i
--
)
{
if
(
!
(
ptr
=
*
ptr_ptr
))
...
...
mysys/lf_hash.c
View file @
3f4aa5f7
...
...
@@ -23,6 +23,7 @@
(but how to do it in lf_hash_delete ?)
*/
#include <my_global.h>
#include <m_string.h>
#include <my_sys.h>
#include <my_bit.h>
#include <lf.h>
...
...
@@ -33,7 +34,7 @@ LF_REQUIRE_PINS(3);
typedef
struct
{
intptr
volatile
link
;
/* a pointer to the next element in a listand a flag */
uint32
hashnr
;
/* reversed hash number, for sorting */
const
uchar
*
key
;
const
byte
*
key
;
uint
keylen
;
}
LF_SLIST
;
...
...
@@ -67,31 +68,31 @@ typedef struct {
pins[0..2] are used, they are NOT removed on return
*/
static
int
lfind
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
CURSOR
*
cursor
,
LF_PINS
*
pins
)
const
byte
*
key
,
uint
keylen
,
CURSOR
*
cursor
,
LF_PINS
*
pins
)
{
uint32
cur_hashnr
;
const
uchar
*
cur_key
;
const
byte
*
cur_key
;
uint
cur_keylen
;
intptr
link
;
retry:
cursor
->
prev
=
(
intptr
*
)
head
;
cursor
->
prev
=
(
intptr
*
)
head
;
do
{
cursor
->
curr
=
PTR
(
*
cursor
->
prev
);
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
cursor
->
curr
=
PTR
(
*
cursor
->
prev
);
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
}
while
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
&&
LF_BACKOFF
);
for
(;;)
{
if
(
!
cursor
->
curr
)
return
0
;
do
{
// XXX or goto retry ?
link
=
cursor
->
curr
->
link
;
cursor
->
next
=
PTR
(
link
);
link
=
cursor
->
curr
->
link
;
cursor
->
next
=
PTR
(
link
);
_lf_pin
(
pins
,
0
,
cursor
->
next
);
}
while
(
link
!=
cursor
->
curr
->
link
&&
LF_BACKOFF
);
cur_hashnr
=
cursor
->
curr
->
hashnr
;
cur_key
=
cursor
->
curr
->
key
;
cur_keylen
=
cursor
->
curr
->
keylen
;
cur_hashnr
=
cursor
->
curr
->
hashnr
;
cur_key
=
cursor
->
curr
->
key
;
cur_keylen
=
cursor
->
curr
->
keylen
;
if
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
)
{
LF_BACKOFF
;
...
...
@@ -101,12 +102,12 @@ retry:
{
if
(
cur_hashnr
>=
hashnr
)
{
int
r
=
1
;
int
r
=
1
;
if
(
cur_hashnr
>
hashnr
||
(
r
=
my_strnncoll
(
cs
,
cur_key
,
cur_keylen
,
key
,
keylen
))
>=
0
)
(
r
=
my_strnncoll
(
cs
,
cur_key
,
cur_keylen
,
key
,
keylen
))
>=
0
)
return
!
r
;
}
cursor
->
prev
=&
(
cursor
->
curr
->
link
);
cursor
->
prev
=
&
(
cursor
->
curr
->
link
);
_lf_pin
(
pins
,
2
,
cursor
->
curr
);
}
else
...
...
@@ -120,7 +121,7 @@ retry:
goto
retry
;
}
}
cursor
->
curr
=
cursor
->
next
;
cursor
->
curr
=
cursor
->
next
;
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
}
}
...
...
@@ -141,21 +142,21 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
LF_SLIST
*
node
,
LF_PINS
*
pins
,
uint
flags
)
{
CURSOR
cursor
;
int
res
=-
1
;
int
res
=
-
1
;
do
{
if
(
lfind
(
head
,
cs
,
node
->
hashnr
,
node
->
key
,
node
->
keylen
,
&
cursor
,
pins
)
&&
(
flags
&
LF_HASH_UNIQUE
))
res
=
0
;
/* duplicate found */
res
=
0
;
/* duplicate found */
else
{
node
->
link
=
(
intptr
)
cursor
.
curr
;
node
->
link
=
(
intptr
)
cursor
.
curr
;
assert
(
node
->
link
!=
(
intptr
)
node
);
assert
(
cursor
.
prev
!=
&
node
->
link
);
if
(
my_atomic_casptr
((
void
**
)
cursor
.
prev
,
(
void
**
)
&
cursor
.
curr
,
node
))
res
=
1
;
/* inserted ok */
res
=
1
;
/* inserted ok */
}
}
while
(
res
==
-
1
);
_lf_unpin
(
pins
,
0
);
...
...
@@ -177,10 +178,10 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
it uses pins[0..2], on return all pins are removed.
*/
static
int
ldelete
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
const
byte
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
{
CURSOR
cursor
;
int
res
=-
1
;
int
res
=
-
1
;
do
{
...
...
@@ -218,30 +219,30 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
all other pins are removed.
*/
static
LF_SLIST
*
lsearch
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
uint32
hashnr
,
const
byte
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
{
CURSOR
cursor
;
int
res
=
lfind
(
head
,
cs
,
hashnr
,
key
,
keylen
,
&
cursor
,
pins
);
int
res
=
lfind
(
head
,
cs
,
hashnr
,
key
,
keylen
,
&
cursor
,
pins
);
if
(
res
)
_lf_pin
(
pins
,
2
,
cursor
.
curr
);
_lf_unpin
(
pins
,
0
);
_lf_unpin
(
pins
,
1
);
return
res
?
cursor
.
curr
:
0
;
}
static
inline
const
uchar
*
hash_key
(
const
LF_HASH
*
hash
,
const
uchar
*
record
,
uint
*
length
)
static
inline
const
byte
*
hash_key
(
const
LF_HASH
*
hash
,
const
byte
*
record
,
uint
*
length
)
{
if
(
hash
->
get_key
)
return
(
*
hash
->
get_key
)(
record
,
length
,
0
);
*
length
=
hash
->
key_length
;
return
(
*
hash
->
get_key
)(
record
,
length
,
0
);
*
length
=
hash
->
key_length
;
return
record
+
hash
->
key_offset
;
}
static
inline
uint
calc_hash
(
LF_HASH
*
hash
,
const
uchar
*
key
,
uint
keylen
)
static
inline
uint
calc_hash
(
LF_HASH
*
hash
,
const
byte
*
key
,
uint
keylen
)
{
ulong
nr1
=
1
,
nr2
=
4
;
hash
->
charset
->
coll
->
hash_sort
(
hash
->
charset
,
key
,
keylen
,
&
nr1
,
&
nr2
);
ulong
nr1
=
1
,
nr2
=
4
;
hash
->
charset
->
coll
->
hash_sort
(
hash
->
charset
,
key
,
keylen
,
&
nr1
,
&
nr2
);
return
nr1
&
INT_MAX32
;
}
...
...
@@ -258,28 +259,28 @@ void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
lf_alloc_init
(
&
hash
->
alloc
,
sizeof
(
LF_SLIST
)
+
element_size
,
offsetof
(
LF_SLIST
,
key
));
lf_dynarray_init
(
&
hash
->
array
,
sizeof
(
LF_SLIST
**
));
hash
->
size
=
1
;
hash
->
count
=
0
;
hash
->
element_size
=
element_size
;
hash
->
flags
=
flags
;
hash
->
charset
=
charset
?
charset
:
&
my_charset_bin
;
hash
->
key_offset
=
key_offset
;
hash
->
key_length
=
key_length
;
hash
->
get_key
=
get_key
;
hash
->
size
=
1
;
hash
->
count
=
0
;
hash
->
element_size
=
element_size
;
hash
->
flags
=
flags
;
hash
->
charset
=
charset
?
charset
:
&
my_charset_bin
;
hash
->
key_offset
=
key_offset
;
hash
->
key_length
=
key_length
;
hash
->
get_key
=
get_key
;
DBUG_ASSERT
(
get_key
?
!
key_offset
&&
!
key_length
:
key_length
);
}
void
lf_hash_destroy
(
LF_HASH
*
hash
)
{
LF_SLIST
*
el
=*
(
LF_SLIST
**
)
_lf_dynarray_lvalue
(
&
hash
->
array
,
0
);
LF_SLIST
*
el
=
*
(
LF_SLIST
**
)
_lf_dynarray_lvalue
(
&
hash
->
array
,
0
);
while
(
el
)
{
intptr
next
=
el
->
link
;
intptr
next
=
el
->
link
;
if
(
el
->
hashnr
&
1
)
lf_alloc_real_free
(
&
hash
->
alloc
,
el
);
else
my_free
((
void
*
)
el
,
MYF
(
0
));
el
=
(
LF_SLIST
*
)
next
;
el
=
(
LF_SLIST
*
)
next
;
}
lf_alloc_destroy
(
&
hash
->
alloc
);
lf_dynarray_destroy
(
&
hash
->
array
);
...
...
@@ -299,19 +300,19 @@ void lf_hash_destroy(LF_HASH *hash)
*/
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
)
{
u
int
csize
,
bucket
,
hashnr
;
int
csize
,
bucket
,
hashnr
;
LF_SLIST
*
node
,
*
volatile
*
el
;
lf_rwlock_by_pins
(
pins
);
node
=
(
LF_SLIST
*
)
_lf_alloc_new
(
pins
);
node
=
(
LF_SLIST
*
)
_lf_alloc_new
(
pins
);
memcpy
(
node
+
1
,
data
,
hash
->
element_size
);
node
->
key
=
hash_key
(
hash
,
(
uchar
*
)(
node
+
1
),
&
node
->
keylen
);
node
->
key
=
hash_key
(
hash
,
(
byte
*
)(
node
+
1
),
&
node
->
keylen
);
hashnr
=
calc_hash
(
hash
,
node
->
key
,
node
->
keylen
);
bucket
=
hashnr
%
hash
->
size
;
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
node
->
hashnr
=
my_reverse_bits
(
hashnr
)
|
1
;
node
->
hashnr
=
my_reverse_bits
(
hashnr
)
|
1
;
if
(
linsert
(
el
,
hash
->
charset
,
node
,
pins
,
hash
->
flags
))
{
_lf_alloc_free
(
pins
,
node
);
...
...
@@ -335,15 +336,15 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
{
LF_SLIST
*
volatile
*
el
;
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
uchar
*
)
key
,
keylen
);
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
byte
*
)
key
,
keylen
);
bucket
=
hashnr
%
hash
->
size
;
lf_rwlock_by_pins
(
pins
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
if
(
ldelete
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
(
uchar
*
)
key
,
keylen
,
pins
))
(
byte
*
)
key
,
keylen
,
pins
))
{
lf_rwunlock_by_pins
(
pins
);
return
1
;
...
...
@@ -360,33 +361,33 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
{
LF_SLIST
*
volatile
*
el
,
*
found
;
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
uchar
*
)
key
,
keylen
);
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
byte
*
)
key
,
keylen
);
bucket
=
hashnr
%
hash
->
size
;
lf_rwlock_by_pins
(
pins
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
found
=
lsearch
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
(
uchar
*
)
key
,
keylen
,
pins
);
(
byte
*
)
key
,
keylen
,
pins
);
lf_rwunlock_by_pins
(
pins
);
return
found
?
found
+
1
:
0
;
}
static
char
*
dummy_key
=
""
;
static
char
*
dummy_key
=
""
;
static
void
initialize_bucket
(
LF_HASH
*
hash
,
LF_SLIST
*
volatile
*
node
,
uint
bucket
,
LF_PINS
*
pins
)
{
uint
parent
=
my_clear_highest_bit
(
bucket
);
LF_SLIST
*
dummy
=
(
LF_SLIST
*
)
my_malloc
(
sizeof
(
LF_SLIST
),
MYF
(
MY_WME
));
LF_SLIST
**
tmp
=
0
,
*
cur
;
LF_SLIST
*
volatile
*
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
parent
);
LF_SLIST
*
dummy
=
(
LF_SLIST
*
)
my_malloc
(
sizeof
(
LF_SLIST
),
MYF
(
MY_WME
));
LF_SLIST
**
tmp
=
0
,
*
cur
;
LF_SLIST
*
volatile
*
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
parent
);
if
(
*
el
==
NULL
&&
bucket
)
initialize_bucket
(
hash
,
el
,
parent
,
pins
);
dummy
->
hashnr
=
my_reverse_bits
(
bucket
);
dummy
->
key
=
dummy_key
;
dummy
->
keylen
=
0
;
dummy
->
hashnr
=
my_reverse_bits
(
bucket
);
dummy
->
key
=
dummy_key
;
dummy
->
keylen
=
0
;
if
((
cur
=
linsert
(
el
,
hash
->
charset
,
dummy
,
pins
,
0
)))
{
my_free
((
void
*
)
dummy
,
MYF
(
0
));
...
...
mysys/my_getsystime.c
View file @
3f4aa5f7
...
...
@@ -35,10 +35,6 @@ ulonglong my_getsystime()
LARGE_INTEGER
t_cnt
;
if
(
!
offset
)
{
/* strictly speaking there should be a mutex to protect
initialization section. But my_getsystime() is called from
UUID() code, and UUID() calls are serialized with a mutex anyway
*/
LARGE_INTEGER
li
;
FILETIME
ft
;
GetSystemTimeAsFileTime
(
&
ft
);
...
...
storage/maria/lockman.h
View file @
3f4aa5f7
...
...
@@ -32,7 +32,7 @@
SLX - Shared + Loose eXclusive
LSIX - Loose Shared + Intention eXclusive
*/
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
};
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
,
LOCK_TYPE_LAST
};
struct
lockman_lock
;
...
...
@@ -55,9 +55,10 @@ typedef struct {
uint
lock_timeout
;
loid_to_lo_func
*
loid_to_lo
;
}
LOCKMAN
;
#define DIDNT_GET_THE_LOCK 0
enum
lockman_getlock_result
{
DIDNT_GET_THE_LOCK
=
0
,
GOT_THE_LOCK
,
NO_MEMORY_FOR_LOCK
=
1
,
DEADLOCK
,
LOCK_TIMEOUT
,
GOT_THE_LOCK
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
};
...
...
storage/maria/tablockman.c
View file @
3f4aa5f7
This diff is collapsed.
Click to expand it.
storage/maria/tablockman.h
View file @
3f4aa5f7
...
...
@@ -33,45 +33,45 @@
LSIX - Loose Shared + Intention eXclusive
*/
#ifndef _lockman_h
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
};
#warning TODO remove N-locks
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
,
LOCK_TYPE_LAST
};
enum
lockman_getlock_result
{
DIDNT_GET_THE_LOCK
=
0
,
GOT_THE_LOCK
,
NO_MEMORY_FOR_LOCK
=
1
,
DEADLOCK
,
LOCK_TIMEOUT
,
GOT_THE_LOCK
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
};
#endif
#define LOCK_TYPES
LSIX
#define LOCK_TYPES
(LOCK_TYPE_LAST-1)
typedef
struct
st_table_lock_owner
TABLE_LOCK_OWNER
;
typedef
struct
st_table_lock
TABLE_LOCK
;
typedef
struct
st_locked_table
LOCKED_TABLE
;
typedef
struct
st_table_lock_owner
{
TABLE_LOCK
*
active_locks
;
/* list of active locks */
TABLE_LOCK
*
waiting_lock
;
/* waiting lock (one lock only) */
struct
st_table_lock_owner
*
waiting_for
;
/* transaction we're waiting for */
pthread_cond_t
*
cond
;
/* transactions waiting for us, wait on 'cond' */
pthread_mutex_t
*
mutex
;
/* mutex is required to use 'cond' */
uint16
loid
,
waiting_for_loid
;
/* Lock Owner IDentifier */
}
TABLE_LOCK_OWNER
;
typedef
struct
st_locked_table
{
pthread_mutex_t
mutex
;
/* mutex for everything below */
HASH
latest_locks
;
/* latest locks in a hash */
TABLE_LOCK
*
active_locks
[
LOCK_TYPES
];
/* dl-list of locks per type */
TABLE_LOCK
*
wait_queue_in
,
*
wait_queue_out
;
/* wait deque (double-end queue)*/
}
LOCKED_TABLE
;
typedef
TABLE_LOCK_OWNER
*
loid_to_tlo_func
(
uint16
);
typedef
struct
{
pthread_mutex_t
pool_mutex
;
TABLE_LOCK
*
pool
;
/* lifo pool of free locks
*/
uint
lock_timeout
;
loid_to_tlo_func
*
loid_to_tlo
;
/* for mapping loid to TABLE_LOCK_OWNER */
TABLE_LOCK
*
pool
;
/* lifo pool of free locks
*/
uint
lock_timeout
;
/* lock timeout in milliseconds */
loid_to_tlo_func
*
loid_to_tlo
;
/* for mapping loid to TABLE_LOCK_OWNER */
}
TABLOCKMAN
;
struct
st_table_lock_owner
{
TABLE_LOCK
*
active_locks
;
/* list of active locks */
TABLE_LOCK
*
waiting_lock
;
/* waiting lock (one lock only) */
TABLE_LOCK_OWNER
*
waiting_for
;
/* transaction we're wating for */
pthread_cond_t
*
cond
;
/* transactions waiting for us, wait on 'cond' */
pthread_mutex_t
*
mutex
;
/* mutex is required to use 'cond' */
uint16
loid
;
/* Lock Owner IDentifier */
};
struct
st_locked_table
{
pthread_mutex_t
mutex
;
/* mutex for everything below */
HASH
active
;
/* active locks ina hash */
TABLE_LOCK
*
active_locks
[
LOCK_TYPES
];
/* dl-list of locks per type */
TABLE_LOCK
*
wait_queue_in
,
*
wait_queue_out
;
/* wait deque */
};
void
tablockman_init
(
TABLOCKMAN
*
,
loid_to_tlo_func
*
,
uint
);
void
tablockman_destroy
(
TABLOCKMAN
*
);
enum
lockman_getlock_result
tablockman_getlock
(
TABLOCKMAN
*
,
TABLE_LOCK_OWNER
*
,
...
...
@@ -81,7 +81,7 @@ void tablockman_init_locked_table(LOCKED_TABLE *, int);
void
tablockman_destroy_locked_table
(
LOCKED_TABLE
*
);
#ifdef EXTRA_DEBUG
void
print_tlo
(
TABLE_LOCK_OWNER
*
);
void
tablockman_
print_tlo
(
TABLE_LOCK_OWNER
*
);
#endif
#endif
...
...
storage/maria/trnman.c
View file @
3f4aa5f7
...
...
@@ -69,7 +69,8 @@ static TRN *short_trid_to_TRN(uint16 short_trid)
return
(
TRN
*
)
trn
;
}
static
byte
*
trn_get_hash_key
(
const
byte
*
trn
,
uint
*
len
,
my_bool
unused
)
static
byte
*
trn_get_hash_key
(
const
byte
*
trn
,
uint
*
len
,
my_bool
unused
__attribute__
((
unused
)))
{
*
len
=
sizeof
(
TrID
);
return
(
byte
*
)
&
((
*
((
TRN
**
)
trn
))
->
trid
);
...
...
storage/maria/unittest/lockman-t.c
View file @
3f4aa5f7
...
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
lockman for row and table locks
*/
//#define EXTRA_VERBOSE
#include <tap.h>
...
...
storage/maria/unittest/lockman1-t.c
View file @
3f4aa5f7
...
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
lockman for row locks, tablockman for table locks
*/
//#define EXTRA_VERBOSE
#include <tap.h>
...
...
@@ -64,7 +68,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ",
DIDNT_GET_THE_LOCK
);
test_lock(O, R, L, "cannot ",
LOCK_TIMEOUT
);
void
test_tablockman_simple
()
{
...
...
@@ -164,8 +168,11 @@ int Ntables= 10;
int
table_lock_ratio
=
10
;
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
res2str
[
4
]
=
{
char
*
res2str
[]
=
{
"DIDN'T GET THE LOCK"
,
"OUT OF MEMORY"
,
"DEADLOCK"
,
"LOCK TIMEOUT"
,
"GOT THE LOCK"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
...
...
@@ -191,7 +198,7 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
...
...
@@ -208,11 +215,6 @@ pthread_handler_t test_lockman(void *arg)
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
switch
(
res
)
{
case
DIDNT_GET_THE_LOCK
:
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
case
GOT_THE_LOCK
:
continue
;
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
...
...
@@ -232,7 +234,10 @@ pthread_handler_t test_lockman(void *arg)
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
continue
;
default:
DBUG_ASSERT
(
0
);
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
}
}
}
...
...
storage/maria/unittest/lockman2-t.c
View file @
3f4aa5f7
...
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
tablockman for row and table locks
*/
//#define EXTRA_VERBOSE
#include <tap.h>
...
...
@@ -57,7 +61,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ",
DIDNT_GET_THE_LOCK
);
test_lock(O, R, L, "cannot ",
LOCK_TIMEOUT
);
void
test_tablockman_simple
()
{
...
...
@@ -165,14 +169,34 @@ void run_test(const char *test, pthread_handler handler, int n, int m)
my_free
((
void
*
)
threads
,
MYF
(
0
));
}
static
void
reinit_tlo
(
TABLOCKMAN
*
lm
,
TABLE_LOCK_OWNER
*
lo
)
{
TABLE_LOCK_OWNER
backup
=
*
lo
;
tablockman_release_locks
(
lm
,
lo
);
/*
pthread_mutex_destroy(lo->mutex);
pthread_cond_destroy(lo->cond);
bzero(lo, sizeof(*lo));
lo->mutex= backup.mutex;
lo->cond= backup.cond;
lo->loid= backup.loid;
pthread_mutex_init(lo->mutex, MY_MUTEX_INIT_FAST);
pthread_cond_init(lo->cond, 0);*/
}
pthread_mutex_t
rt_mutex
;
int
Nrows
=
100
;
int
Ntables
=
10
;
int
table_lock_ratio
=
10
;
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
res2str
[
4
]
=
{
"DIDN'T GET THE LOCK"
,
char
*
res2str
[]
=
{
0
,
"OUT OF MEMORY"
,
"DEADLOCK"
,
"LOCK TIMEOUT"
,
"GOT THE LOCK"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
...
...
@@ -200,9 +224,9 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
...
...
@@ -217,11 +241,6 @@ pthread_handler_t test_lockman(void *arg)
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
switch
(
res
)
{
case
DIDNT_GET_THE_LOCK
:
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
case
GOT_THE_LOCK
:
continue
;
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
...
...
@@ -230,9 +249,9 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
row
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, ROW %d, lock %s, res %s"
,
loid
,
row
,
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
...
...
@@ -240,12 +259,15 @@ pthread_handler_t test_lockman(void *arg)
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
continue
;
default:
DBUG_ASSERT
(
0
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
}
}
}
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
pthread_mutex_lock
(
&
rt_mutex
);
rt_num_threads
--
;
...
...
@@ -264,7 +286,7 @@ int main()
my_init
();
pthread_mutex_init
(
&
rt_mutex
,
0
);
plan
(
39
);
plan
(
40
);
if
(
my_atomic_initialize
())
return
exit_status
();
...
...
@@ -299,7 +321,7 @@ int main()
Nrows
=
100
;
Ntables
=
10
;
table_lock_ratio
=
10
;
//
run_test("\"random lock\" stress test", test_lockman, THREADS, CYCLES);
run_test
(
"
\"
random lock
\"
stress test"
,
test_lockman
,
THREADS
,
CYCLES
);
#if 0
/* "real-life" simulation - many rows, no table locks */
Nrows= 1000000;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment