Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
3f4aa5f7
Commit
3f4aa5f7
authored
Nov 17, 2006
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-maria
into janus.mylan:/usr/home/serg/Abk/mysql-maria
parents
3becab22
915cebdd
Changes
16
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
471 additions
and
301 deletions
+471
-301
BUILD/SETUP.sh
BUILD/SETUP.sh
+2
-1
include/atomic/nolock.h
include/atomic/nolock.h
+1
-1
include/lf.h
include/lf.h
+32
-15
include/my_atomic.h
include/my_atomic.h
+0
-8
include/my_global.h
include/my_global.h
+10
-1
mysys/lf_alloc-pin.c
mysys/lf_alloc-pin.c
+15
-14
mysys/lf_dynarray.c
mysys/lf_dynarray.c
+23
-12
mysys/lf_hash.c
mysys/lf_hash.c
+60
-59
mysys/my_getsystime.c
mysys/my_getsystime.c
+0
-4
storage/maria/lockman.h
storage/maria/lockman.h
+4
-3
storage/maria/tablockman.c
storage/maria/tablockman.c
+240
-131
storage/maria/tablockman.h
storage/maria/tablockman.h
+26
-26
storage/maria/trnman.c
storage/maria/trnman.c
+2
-1
storage/maria/unittest/lockman-t.c
storage/maria/unittest/lockman-t.c
+4
-0
storage/maria/unittest/lockman1-t.c
storage/maria/unittest/lockman1-t.c
+14
-9
storage/maria/unittest/lockman2-t.c
storage/maria/unittest/lockman2-t.c
+38
-16
No files found.
BUILD/SETUP.sh
View file @
3f4aa5f7
...
@@ -117,8 +117,9 @@ valgrind_flags="-USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify "
...
@@ -117,8 +117,9 @@ valgrind_flags="-USAFEMALLOC -UFORCE_INIT_OF_VARS -DHAVE_purify "
valgrind_flags
=
"
$valgrind_flags
-DMYSQL_SERVER_SUFFIX=-valgrind-max"
valgrind_flags
=
"
$valgrind_flags
-DMYSQL_SERVER_SUFFIX=-valgrind-max"
#
#
# Used in -debug builds
# Used in -debug builds
debug_cflags
=
"-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS
"
debug_cflags
=
"-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG -DFORCE_INIT_OF_VARS"
debug_cflags
=
"
$debug_cflags
-DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
debug_cflags
=
"
$debug_cflags
-DSAFEMALLOC -DPEDANTIC_SAFEMALLOC -DSAFE_MUTEX"
debug_cflags
=
"
$debug_cflags
-DMY_LF_EXTRA_DEBUG"
error_inject
=
"--with-error-inject "
error_inject
=
"--with-error-inject "
#
#
# Base C++ flags for all builds
# Base C++ flags for all builds
...
...
include/atomic/nolock.h
View file @
3f4aa5f7
...
@@ -32,7 +32,7 @@
...
@@ -32,7 +32,7 @@
#ifdef make_atomic_cas_body
#ifdef make_atomic_cas_body
typedef
struct
{
}
my_atomic_rwlock_t
;
typedef
struct
{
}
my_atomic_rwlock_t
__attribute__
((
unused
))
;
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name)
#define my_atomic_rwlock_rdlock(name)
...
...
include/lf.h
View file @
3f4aa5f7
...
@@ -24,7 +24,7 @@
...
@@ -24,7 +24,7 @@
func() is a _func() protected by my_atomic_rwlock_wrlock()
func() is a _func() protected by my_atomic_rwlock_wrlock()
*/
*/
#define lock_wrap(f,
t,proto_args, args, lock)
\
#define lock_wrap(f,
t, proto_args, args, lock)
\
t _ ## f proto_args; \
t _ ## f proto_args; \
static inline t f proto_args \
static inline t f proto_args \
{ \
{ \
...
@@ -35,7 +35,7 @@ static inline t f proto_args \
...
@@ -35,7 +35,7 @@ static inline t f proto_args \
return ret; \
return ret; \
}
}
#define lock_wrap_void(f,proto_args, args, lock) \
#define lock_wrap_void(f,
proto_args, args, lock) \
void _ ## f proto_args; \
void _ ## f proto_args; \
static inline void f proto_args \
static inline void f proto_args \
{ \
{ \
...
@@ -44,14 +44,14 @@ static inline void f proto_args \
...
@@ -44,14 +44,14 @@ static inline void f proto_args \
my_atomic_rwlock_wrunlock(lock); \
my_atomic_rwlock_wrunlock(lock); \
}
}
#define nolock_wrap(f,
t,proto_args, args)
\
#define nolock_wrap(f,
t, proto_args, args)
\
t _ ## f proto_args; \
t _ ## f proto_args; \
static inline t f proto_args \
static inline t f proto_args \
{ \
{ \
return _ ## f args; \
return _ ## f args; \
}
}
#define nolock_wrap_void(f,
proto_args, args)
\
#define nolock_wrap_void(f,
proto_args, args)
\
void _ ## f proto_args; \
void _ ## f proto_args; \
static inline void f proto_args \
static inline void f proto_args \
{ \
{ \
...
@@ -80,14 +80,14 @@ void lf_dynarray_destroy(LF_DYNARRAY *array);
...
@@ -80,14 +80,14 @@ void lf_dynarray_destroy(LF_DYNARRAY *array);
nolock_wrap
(
lf_dynarray_value
,
void
*
,
nolock_wrap
(
lf_dynarray_value
,
void
*
,
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
array
,
idx
));
(
array
,
idx
));
lock_wrap
(
lf_dynarray_lvalue
,
void
*
,
lock_wrap
(
lf_dynarray_lvalue
,
void
*
,
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
LF_DYNARRAY
*
array
,
uint
idx
),
(
array
,
idx
),
(
array
,
idx
),
&
array
->
lock
);
&
array
->
lock
);
nolock_wrap
(
lf_dynarray_iterate
,
int
,
nolock_wrap
(
lf_dynarray_iterate
,
int
,
(
LF_DYNARRAY
*
array
,
lf_dynarray_func
func
,
void
*
arg
),
(
LF_DYNARRAY
*
array
,
lf_dynarray_func
func
,
void
*
arg
),
(
array
,
func
,
arg
));
(
array
,
func
,
arg
));
/*
/*
pin manager for memory allocator, lf_alloc-pin.c
pin manager for memory allocator, lf_alloc-pin.c
...
@@ -115,9 +115,14 @@ typedef struct {
...
@@ -115,9 +115,14 @@ typedef struct {
uint32
volatile
link
;
uint32
volatile
link
;
/* we want sizeof(LF_PINS) to be 128 to avoid false sharing */
/* we want sizeof(LF_PINS) to be 128 to avoid false sharing */
char
pad
[
128
-
sizeof
(
uint32
)
*
2
char
pad
[
128
-
sizeof
(
uint32
)
*
2
-
sizeof
(
void
*
)
*
(
LF_PINBOX_PINS
+
2
)];
-
sizeof
(
LF_PINBOX
*
)
-
sizeof
(
void
*
)
*
(
LF_PINBOX_PINS
+
1
)];
}
LF_PINS
;
}
LF_PINS
;
/*
shortcut macros to do an atomic_wrlock on a structure that uses pins
(e.g. lf_hash).
*/
#define lf_rwlock_by_pins(PINS) \
#define lf_rwlock_by_pins(PINS) \
my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinstack.lock)
my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinstack.lock)
#define lf_rwunlock_by_pins(PINS) \
#define lf_rwunlock_by_pins(PINS) \
...
@@ -131,11 +136,11 @@ typedef struct {
...
@@ -131,11 +136,11 @@ typedef struct {
#if defined(__GNUC__) && defined(MY_LF_EXTRA_DEBUG)
#if defined(__GNUC__) && defined(MY_LF_EXTRA_DEBUG)
#define LF_REQUIRE_PINS(N) \
#define LF_REQUIRE_PINS(N) \
static const char require_pins[LF_PINBOX_PINS-N]; \
static const char require_pins[LF_PINBOX_PINS-N]; \
static const int LF_NUM_PINS_IN_THIS_FILE=N;
static const int LF_NUM_PINS_IN_THIS_FILE=
N;
#define _lf_pin(PINS, PIN, ADDR) \
#define _lf_pin(PINS, PIN, ADDR) \
( \
( \
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR)),
\
assert(PIN < LF_NUM_PINS_IN_THIS_FILE),
\
assert(PIN < LF_NUM_PINS_IN_THIS_FILE)
\
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR))
\
)
)
#else
#else
#define LF_REQUIRE_PINS(N)
#define LF_REQUIRE_PINS(N)
...
@@ -151,7 +156,7 @@ typedef struct {
...
@@ -151,7 +156,7 @@ typedef struct {
} while (0)
} while (0)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define _lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0)
#define _lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0)
#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN]
==
0)
#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN]
==
0)
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
);
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
);
...
@@ -167,16 +172,20 @@ lock_wrap_void(lf_pinbox_put_pins,
...
@@ -167,16 +172,20 @@ lock_wrap_void(lf_pinbox_put_pins,
&
pins
->
pinbox
->
pinstack
.
lock
);
&
pins
->
pinbox
->
pinstack
.
lock
);
lock_wrap_void
(
lf_pinbox_free
,
lock_wrap_void
(
lf_pinbox_free
,
(
LF_PINS
*
pins
,
void
*
addr
),
(
LF_PINS
*
pins
,
void
*
addr
),
(
pins
,
addr
),
(
pins
,
addr
),
&
pins
->
pinbox
->
pinstack
.
lock
);
&
pins
->
pinbox
->
pinstack
.
lock
);
/*
/*
memory allocator, lf_alloc-pin.c
memory allocator, lf_alloc-pin.c
*/
*/
struct
st_lf_alloc_node
{
struct
st_lf_alloc_node
*
next
;
};
typedef
struct
st_lf_allocator
{
typedef
struct
st_lf_allocator
{
LF_PINBOX
pinbox
;
LF_PINBOX
pinbox
;
void
*
volatile
top
;
struct
st_lf_alloc_node
*
volatile
top
;
uint
element_size
;
uint
element_size
;
uint32
volatile
mallocs
;
uint32
volatile
mallocs
;
}
LF_ALLOCATOR
;
}
LF_ALLOCATOR
;
...
@@ -184,13 +193,17 @@ typedef struct st_lf_allocator {
...
@@ -184,13 +193,17 @@ typedef struct st_lf_allocator {
void
lf_alloc_init
(
LF_ALLOCATOR
*
allocator
,
uint
size
,
uint
free_ptr_offset
);
void
lf_alloc_init
(
LF_ALLOCATOR
*
allocator
,
uint
size
,
uint
free_ptr_offset
);
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
);
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
);
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
);
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
);
/*
shortcut macros to access underlying pinbox functions from an LF_ALLOCATOR
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
*/
#define _lf_alloc_free(PINS, PTR) _lf_pinbox_free((PINS), (PTR))
#define _lf_alloc_free(PINS, PTR) _lf_pinbox_free((PINS), (PTR))
#define lf_alloc_free(PINS, PTR) lf_pinbox_free((PINS), (PTR))
#define lf_alloc_free(PINS, PTR) lf_pinbox_free((PINS), (PTR))
#define _lf_alloc_get_pins(ALLOC) _lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define _lf_alloc_get_pins(ALLOC) _lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define lf_alloc_get_pins(ALLOC) lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define lf_alloc_get_pins(ALLOC) lf_pinbox_get_pins(&(ALLOC)->pinbox)
#define _lf_alloc_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define _lf_alloc_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define lf_alloc_put_pins(PINS) lf_pinbox_put_pins(PINS)
#define lf_alloc_put_pins(PINS) lf_pinbox_put_pins(PINS)
#define lf_alloc_real_free(ALLOC,ADDR) my_free((gptr)(ADDR), MYF(0))
#define lf_alloc_real_free(ALLOC,
ADDR) my_free((gptr)(ADDR), MYF(0))
lock_wrap
(
lf_alloc_new
,
void
*
,
lock_wrap
(
lf_alloc_new
,
void
*
,
(
LF_PINS
*
pins
),
(
LF_PINS
*
pins
),
...
@@ -222,6 +235,10 @@ void lf_hash_destroy(LF_HASH *hash);
...
@@ -222,6 +235,10 @@ void lf_hash_destroy(LF_HASH *hash);
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
);
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
);
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
);
/*
shortcut macros to access underlying pinbox functions from an LF_HASH
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
*/
#define _lf_hash_get_pins(HASH) _lf_alloc_get_pins(&(HASH)->alloc)
#define _lf_hash_get_pins(HASH) _lf_alloc_get_pins(&(HASH)->alloc)
#define lf_hash_get_pins(HASH) lf_alloc_get_pins(&(HASH)->alloc)
#define lf_hash_get_pins(HASH) lf_alloc_get_pins(&(HASH)->alloc)
#define _lf_hash_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define _lf_hash_put_pins(PINS) _lf_pinbox_put_pins(PINS)
...
...
include/my_atomic.h
View file @
3f4aa5f7
...
@@ -173,14 +173,6 @@ make_atomic_fas(ptr)
...
@@ -173,14 +173,6 @@ make_atomic_fas(ptr)
#define LF_BACKOFF (1)
#define LF_BACKOFF (1)
#endif
#endif
#if SIZEOF_CHARP == SIZEOF_INT
typedef
int
intptr
;
#elif SIZEOF_CHARP == SIZEOF_LONG
typedef
long
intptr
;
#else
#error
#endif
#define MY_ATOMIC_OK 0
#define MY_ATOMIC_OK 0
#define MY_ATOMIC_NOT_1CPU 1
#define MY_ATOMIC_NOT_1CPU 1
extern
int
my_atomic_initialize
();
extern
int
my_atomic_initialize
();
...
...
include/my_global.h
View file @
3f4aa5f7
...
@@ -432,7 +432,8 @@ C_MODE_END
...
@@ -432,7 +432,8 @@ C_MODE_END
#define compile_time_assert(X) \
#define compile_time_assert(X) \
do \
do \
{ \
{ \
char compile_time_assert[(X) ? 1 : -1]; \
char compile_time_assert[(X) ? 1 : -1] \
__attribute__ ((unused)); \
} while(0)
} while(0)
/* Go around some bugs in different OS and compilers */
/* Go around some bugs in different OS and compilers */
...
@@ -964,6 +965,14 @@ typedef unsigned __int64 my_ulonglong;
...
@@ -964,6 +965,14 @@ typedef unsigned __int64 my_ulonglong;
typedef
unsigned
long
long
my_ulonglong
;
typedef
unsigned
long
long
my_ulonglong
;
#endif
#endif
#if SIZEOF_CHARP == SIZEOF_INT
typedef
int
intptr
;
#elif SIZEOF_CHARP == SIZEOF_LONG
typedef
long
intptr
;
#else
#error
#endif
#ifdef USE_RAID
#ifdef USE_RAID
/*
/*
The following is done with a if to not get problems with pre-processors
The following is done with a if to not get problems with pre-processors
...
...
mysys/lf_alloc-pin.c
View file @
3f4aa5f7
...
@@ -91,7 +91,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins);
...
@@ -91,7 +91,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins);
See the latter for details.
See the latter for details.
*/
*/
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
void
lf_pinbox_init
(
LF_PINBOX
*
pinbox
,
uint
free_ptr_offset
,
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
)
lf_pinbox_free_func
*
free_func
,
void
*
free_func_arg
)
{
{
DBUG_ASSERT
(
sizeof
(
LF_PINS
)
==
128
);
DBUG_ASSERT
(
sizeof
(
LF_PINS
)
==
128
);
DBUG_ASSERT
(
free_ptr_offset
%
sizeof
(
void
*
)
==
0
);
DBUG_ASSERT
(
free_ptr_offset
%
sizeof
(
void
*
)
==
0
);
...
@@ -306,7 +306,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
...
@@ -306,7 +306,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
{
{
if
(
addr
)
/* use binary search */
if
(
addr
)
/* use binary search */
{
{
void
**
a
,
**
b
,
**
c
;
void
**
a
,
**
b
,
**
c
;
for
(
a
=
addr
,
b
=
addr
+
npins
-
1
,
c
=
a
+
(
b
-
a
)
/
2
;
b
-
a
>
1
;
c
=
a
+
(
b
-
a
)
/
2
)
for
(
a
=
addr
,
b
=
addr
+
npins
-
1
,
c
=
a
+
(
b
-
a
)
/
2
;
b
-
a
>
1
;
c
=
a
+
(
b
-
a
)
/
2
)
if
(
cur
==
*
c
)
if
(
cur
==
*
c
)
a
=
b
=
c
;
a
=
b
=
c
;
...
@@ -337,13 +337,13 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
...
@@ -337,13 +337,13 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
callback for _lf_pinbox_real_free to free an unpinned object -
callback for _lf_pinbox_real_free to free an unpinned object -
add it back to the allocator stack
add it back to the allocator stack
*/
*/
static
void
alloc_free
(
void
*
node
,
LF_ALLOCATOR
*
allocator
)
static
void
alloc_free
(
struct
st_lf_alloc_node
*
node
,
LF_ALLOCATOR
*
allocator
)
{
{
void
*
tmp
;
struct
st_lf_alloc_node
*
tmp
;
tmp
=
allocator
->
top
;
tmp
=
allocator
->
top
;
do
do
{
{
(
*
(
void
**
)
node
)
=
tmp
;
node
->
next
=
tmp
;
}
while
(
!
my_atomic_casptr
((
void
**
)
&
allocator
->
top
,
(
void
**
)
&
tmp
,
node
)
&&
}
while
(
!
my_atomic_casptr
((
void
**
)
&
allocator
->
top
,
(
void
**
)
&
tmp
,
node
)
&&
LF_BACKOFF
);
LF_BACKOFF
);
}
}
...
@@ -379,12 +379,12 @@ void lf_alloc_init(LF_ALLOCATOR *allocator, uint size, uint free_ptr_offset)
...
@@ -379,12 +379,12 @@ void lf_alloc_init(LF_ALLOCATOR *allocator, uint size, uint free_ptr_offset)
*/
*/
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
)
void
lf_alloc_destroy
(
LF_ALLOCATOR
*
allocator
)
{
{
void
*
el
=
allocator
->
top
;
struct
st_lf_alloc_node
*
node
=
allocator
->
top
;
while
(
el
)
while
(
node
)
{
{
void
*
tmp
=
*
(
void
**
)
el
;
struct
st_lf_alloc_node
*
tmp
=
node
->
next
;
my_free
(
el
,
MYF
(
0
));
my_free
(
(
void
*
)
node
,
MYF
(
0
));
el
=
tmp
;
node
=
tmp
;
}
}
lf_pinbox_destroy
(
&
allocator
->
pinbox
);
lf_pinbox_destroy
(
&
allocator
->
pinbox
);
allocator
->
top
=
0
;
allocator
->
top
=
0
;
...
@@ -400,7 +400,7 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator)
...
@@ -400,7 +400,7 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator)
void
*
_lf_alloc_new
(
LF_PINS
*
pins
)
void
*
_lf_alloc_new
(
LF_PINS
*
pins
)
{
{
LF_ALLOCATOR
*
allocator
=
(
LF_ALLOCATOR
*
)(
pins
->
pinbox
->
free_func_arg
);
LF_ALLOCATOR
*
allocator
=
(
LF_ALLOCATOR
*
)(
pins
->
pinbox
->
free_func_arg
);
void
*
node
;
struct
st_lf_alloc_node
*
node
;
for
(;;)
for
(;;)
{
{
do
do
...
@@ -410,7 +410,8 @@ void *_lf_alloc_new(LF_PINS *pins)
...
@@ -410,7 +410,8 @@ void *_lf_alloc_new(LF_PINS *pins)
}
while
(
node
!=
allocator
->
top
&&
LF_BACKOFF
);
}
while
(
node
!=
allocator
->
top
&&
LF_BACKOFF
);
if
(
!
node
)
if
(
!
node
)
{
{
if
(
!
(
node
=
my_malloc
(
allocator
->
element_size
,
MYF
(
MY_WME
|
MY_ZEROFILL
))))
if
(
!
(
node
=
(
void
*
)
my_malloc
(
allocator
->
element_size
,
MYF
(
MY_WME
|
MY_ZEROFILL
))))
break
;
break
;
#ifdef MY_LF_EXTRA_DEBUG
#ifdef MY_LF_EXTRA_DEBUG
my_atomic_add32
(
&
allocator
->
mallocs
,
1
);
my_atomic_add32
(
&
allocator
->
mallocs
,
1
);
...
@@ -434,8 +435,8 @@ void *_lf_alloc_new(LF_PINS *pins)
...
@@ -434,8 +435,8 @@ void *_lf_alloc_new(LF_PINS *pins)
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
)
uint
lf_alloc_in_pool
(
LF_ALLOCATOR
*
allocator
)
{
{
uint
i
;
uint
i
;
void
*
node
;
struct
st_lf_alloc_node
*
node
;
for
(
node
=
allocator
->
top
,
i
=
0
;
node
;
node
=
*
(
void
**
)
node
,
i
++
)
for
(
node
=
allocator
->
top
,
i
=
0
;
node
;
node
=
node
->
next
,
i
++
)
/* no op */
;
/* no op */
;
return
i
;
return
i
;
}
}
...
...
mysys/lf_dynarray.c
View file @
3f4aa5f7
...
@@ -19,9 +19,9 @@
...
@@ -19,9 +19,9 @@
(so no pointer into the array may ever become invalid).
(so no pointer into the array may ever become invalid).
Memory is allocated in non-contiguous chunks.
Memory is allocated in non-contiguous chunks.
This data structure is not space efficient for spar
c
e arrays.
This data structure is not space efficient for spar
s
e arrays.
The number of elements is limited to
2^16
The number of elements is limited to
4311810304
Every element is aligned to sizeof(element) boundary
Every element is aligned to sizeof(element) boundary
(to avoid false sharing if element is big enough).
(to avoid false sharing if element is big enough).
...
@@ -49,7 +49,8 @@ void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
...
@@ -49,7 +49,8 @@ void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
static
void
recursive_free
(
void
**
alloc
,
int
level
)
static
void
recursive_free
(
void
**
alloc
,
int
level
)
{
{
if
(
!
alloc
)
return
;
if
(
!
alloc
)
return
;
if
(
level
)
if
(
level
)
{
{
...
@@ -68,10 +69,9 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
...
@@ -68,10 +69,9 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
for
(
i
=
0
;
i
<
LF_DYNARRAY_LEVELS
;
i
++
)
for
(
i
=
0
;
i
<
LF_DYNARRAY_LEVELS
;
i
++
)
recursive_free
(
array
->
level
[
i
],
i
);
recursive_free
(
array
->
level
[
i
],
i
);
my_atomic_rwlock_destroy
(
&
array
->
lock
);
my_atomic_rwlock_destroy
(
&
array
->
lock
);
bzero
(
array
,
sizeof
(
*
array
));
}
}
static
const
long
dynarray_idxes_in_prev_level
[
LF_DYNARRAY_LEVELS
]
=
static
const
ulong
dynarray_idxes_in_prev_levels
[
LF_DYNARRAY_LEVELS
]
=
{
{
0
,
/* +1 here to to avoid -1's below */
0
,
/* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH
,
LF_DYNARRAY_LEVEL_LENGTH
,
...
@@ -82,6 +82,15 @@ static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
...
@@ -82,6 +82,15 @@ static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
LF_DYNARRAY_LEVEL_LENGTH
+
LF_DYNARRAY_LEVEL_LENGTH
LF_DYNARRAY_LEVEL_LENGTH
+
LF_DYNARRAY_LEVEL_LENGTH
};
};
static
const
ulong
dynarray_idxes_in_prev_level
[
LF_DYNARRAY_LEVELS
]
=
{
0
,
/* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH
,
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
,
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
*
LF_DYNARRAY_LEVEL_LENGTH
,
};
/*
/*
Returns a valid lvalue pointer to the element number 'idx'.
Returns a valid lvalue pointer to the element number 'idx'.
Allocates memory if necessary.
Allocates memory if necessary.
...
@@ -91,16 +100,17 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
...
@@ -91,16 +100,17 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
int
i
;
int
i
;
for
(
i
=
3
;
idx
<
dynarray_idxes_in_prev_level
[
i
];
i
--
)
/* no-op */
;
for
(
i
=
LF_DYNARRAY_LEVELS
-
1
;
idx
<
dynarray_idxes_in_prev_levels
[
i
];
i
--
)
/* no-op */
;
ptr_ptr
=
&
array
->
level
[
i
];
ptr_ptr
=
&
array
->
level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
s
[
i
];
for
(;
i
>
0
;
i
--
)
for
(;
i
>
0
;
i
--
)
{
{
if
(
!
(
ptr
=
*
ptr_ptr
))
if
(
!
(
ptr
=
*
ptr_ptr
))
{
{
void
*
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
sizeof
(
void
*
),
void
*
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
sizeof
(
void
*
),
MYF
(
MY_WME
|
MY_ZEROFILL
));
MYF
(
MY_WME
|
MY_ZEROFILL
));
if
(
!
alloc
)
if
(
unlikely
(
!
alloc
)
)
return
(
NULL
);
return
(
NULL
);
if
(
my_atomic_casptr
(
ptr_ptr
,
&
ptr
,
alloc
))
if
(
my_atomic_casptr
(
ptr_ptr
,
&
ptr
,
alloc
))
ptr
=
alloc
;
ptr
=
alloc
;
...
@@ -116,7 +126,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
...
@@ -116,7 +126,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
array
->
size_of_element
+
alloc
=
my_malloc
(
LF_DYNARRAY_LEVEL_LENGTH
*
array
->
size_of_element
+
max
(
array
->
size_of_element
,
sizeof
(
void
*
)),
max
(
array
->
size_of_element
,
sizeof
(
void
*
)),
MYF
(
MY_WME
|
MY_ZEROFILL
));
MYF
(
MY_WME
|
MY_ZEROFILL
));
if
(
!
alloc
)
if
(
unlikely
(
!
alloc
)
)
return
(
NULL
);
return
(
NULL
);
/* reserve the space for free() address */
/* reserve the space for free() address */
data
=
alloc
+
sizeof
(
void
*
);
data
=
alloc
+
sizeof
(
void
*
);
...
@@ -143,9 +153,10 @@ void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
...
@@ -143,9 +153,10 @@ void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
void
*
ptr
,
*
volatile
*
ptr_ptr
=
0
;
int
i
;
int
i
;
for
(
i
=
3
;
idx
<
dynarray_idxes_in_prev_level
[
i
];
i
--
)
/* no-op */
;
for
(
i
=
LF_DYNARRAY_LEVELS
-
1
;
idx
<
dynarray_idxes_in_prev_levels
[
i
];
i
--
)
/* no-op */
;
ptr_ptr
=
&
array
->
level
[
i
];
ptr_ptr
=
&
array
->
level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
[
i
];
idx
-=
dynarray_idxes_in_prev_level
s
[
i
];
for
(;
i
>
0
;
i
--
)
for
(;
i
>
0
;
i
--
)
{
{
if
(
!
(
ptr
=
*
ptr_ptr
))
if
(
!
(
ptr
=
*
ptr_ptr
))
...
...
mysys/lf_hash.c
View file @
3f4aa5f7
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
(but how to do it in lf_hash_delete ?)
(but how to do it in lf_hash_delete ?)
*/
*/
#include <my_global.h>
#include <my_global.h>
#include <m_string.h>
#include <my_sys.h>
#include <my_sys.h>
#include <my_bit.h>
#include <my_bit.h>
#include <lf.h>
#include <lf.h>
...
@@ -33,7 +34,7 @@ LF_REQUIRE_PINS(3);
...
@@ -33,7 +34,7 @@ LF_REQUIRE_PINS(3);
typedef
struct
{
typedef
struct
{
intptr
volatile
link
;
/* a pointer to the next element in a listand a flag */
intptr
volatile
link
;
/* a pointer to the next element in a listand a flag */
uint32
hashnr
;
/* reversed hash number, for sorting */
uint32
hashnr
;
/* reversed hash number, for sorting */
const
uchar
*
key
;
const
byte
*
key
;
uint
keylen
;
uint
keylen
;
}
LF_SLIST
;
}
LF_SLIST
;
...
@@ -67,31 +68,31 @@ typedef struct {
...
@@ -67,31 +68,31 @@ typedef struct {
pins[0..2] are used, they are NOT removed on return
pins[0..2] are used, they are NOT removed on return
*/
*/
static
int
lfind
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
static
int
lfind
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
CURSOR
*
cursor
,
LF_PINS
*
pins
)
const
byte
*
key
,
uint
keylen
,
CURSOR
*
cursor
,
LF_PINS
*
pins
)
{
{
uint32
cur_hashnr
;
uint32
cur_hashnr
;
const
uchar
*
cur_key
;
const
byte
*
cur_key
;
uint
cur_keylen
;
uint
cur_keylen
;
intptr
link
;
intptr
link
;
retry:
retry:
cursor
->
prev
=
(
intptr
*
)
head
;
cursor
->
prev
=
(
intptr
*
)
head
;
do
{
do
{
cursor
->
curr
=
PTR
(
*
cursor
->
prev
);
cursor
->
curr
=
PTR
(
*
cursor
->
prev
);
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
}
while
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
&&
LF_BACKOFF
);
}
while
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
&&
LF_BACKOFF
);
for
(;;)
for
(;;)
{
{
if
(
!
cursor
->
curr
)
if
(
!
cursor
->
curr
)
return
0
;
return
0
;
do
{
// XXX or goto retry ?
do
{
// XXX or goto retry ?
link
=
cursor
->
curr
->
link
;
link
=
cursor
->
curr
->
link
;
cursor
->
next
=
PTR
(
link
);
cursor
->
next
=
PTR
(
link
);
_lf_pin
(
pins
,
0
,
cursor
->
next
);
_lf_pin
(
pins
,
0
,
cursor
->
next
);
}
while
(
link
!=
cursor
->
curr
->
link
&&
LF_BACKOFF
);
}
while
(
link
!=
cursor
->
curr
->
link
&&
LF_BACKOFF
);
cur_hashnr
=
cursor
->
curr
->
hashnr
;
cur_hashnr
=
cursor
->
curr
->
hashnr
;
cur_key
=
cursor
->
curr
->
key
;
cur_key
=
cursor
->
curr
->
key
;
cur_keylen
=
cursor
->
curr
->
keylen
;
cur_keylen
=
cursor
->
curr
->
keylen
;
if
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
)
if
(
*
cursor
->
prev
!=
(
intptr
)
cursor
->
curr
)
{
{
LF_BACKOFF
;
LF_BACKOFF
;
...
@@ -101,12 +102,12 @@ static int lfind(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
...
@@ -101,12 +102,12 @@ static int lfind(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
{
{
if
(
cur_hashnr
>=
hashnr
)
if
(
cur_hashnr
>=
hashnr
)
{
{
int
r
=
1
;
int
r
=
1
;
if
(
cur_hashnr
>
hashnr
||
if
(
cur_hashnr
>
hashnr
||
(
r
=
my_strnncoll
(
cs
,
cur_key
,
cur_keylen
,
key
,
keylen
))
>=
0
)
(
r
=
my_strnncoll
(
cs
,
cur_key
,
cur_keylen
,
key
,
keylen
))
>=
0
)
return
!
r
;
return
!
r
;
}
}
cursor
->
prev
=&
(
cursor
->
curr
->
link
);
cursor
->
prev
=
&
(
cursor
->
curr
->
link
);
_lf_pin
(
pins
,
2
,
cursor
->
curr
);
_lf_pin
(
pins
,
2
,
cursor
->
curr
);
}
}
else
else
...
@@ -120,7 +121,7 @@ static int lfind(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
...
@@ -120,7 +121,7 @@ static int lfind(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
goto
retry
;
goto
retry
;
}
}
}
}
cursor
->
curr
=
cursor
->
next
;
cursor
->
curr
=
cursor
->
next
;
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
_lf_pin
(
pins
,
1
,
cursor
->
curr
);
}
}
}
}
...
@@ -141,21 +142,21 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
...
@@ -141,21 +142,21 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
LF_SLIST
*
node
,
LF_PINS
*
pins
,
uint
flags
)
LF_SLIST
*
node
,
LF_PINS
*
pins
,
uint
flags
)
{
{
CURSOR
cursor
;
CURSOR
cursor
;
int
res
=-
1
;
int
res
=
-
1
;
do
do
{
{
if
(
lfind
(
head
,
cs
,
node
->
hashnr
,
node
->
key
,
node
->
keylen
,
if
(
lfind
(
head
,
cs
,
node
->
hashnr
,
node
->
key
,
node
->
keylen
,
&
cursor
,
pins
)
&&
&
cursor
,
pins
)
&&
(
flags
&
LF_HASH_UNIQUE
))
(
flags
&
LF_HASH_UNIQUE
))
res
=
0
;
/* duplicate found */
res
=
0
;
/* duplicate found */
else
else
{
{
node
->
link
=
(
intptr
)
cursor
.
curr
;
node
->
link
=
(
intptr
)
cursor
.
curr
;
assert
(
node
->
link
!=
(
intptr
)
node
);
assert
(
node
->
link
!=
(
intptr
)
node
);
assert
(
cursor
.
prev
!=
&
node
->
link
);
assert
(
cursor
.
prev
!=
&
node
->
link
);
if
(
my_atomic_casptr
((
void
**
)
cursor
.
prev
,
(
void
**
)
&
cursor
.
curr
,
node
))
if
(
my_atomic_casptr
((
void
**
)
cursor
.
prev
,
(
void
**
)
&
cursor
.
curr
,
node
))
res
=
1
;
/* inserted ok */
res
=
1
;
/* inserted ok */
}
}
}
while
(
res
==
-
1
);
}
while
(
res
==
-
1
);
_lf_unpin
(
pins
,
0
);
_lf_unpin
(
pins
,
0
);
...
@@ -177,10 +178,10 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
...
@@ -177,10 +178,10 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
it uses pins[0..2], on return all pins are removed.
it uses pins[0..2], on return all pins are removed.
*/
*/
static
int
ldelete
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
static
int
ldelete
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
const
byte
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
{
{
CURSOR
cursor
;
CURSOR
cursor
;
int
res
=-
1
;
int
res
=
-
1
;
do
do
{
{
...
@@ -218,30 +219,30 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
...
@@ -218,30 +219,30 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
all other pins are removed.
all other pins are removed.
*/
*/
static
LF_SLIST
*
lsearch
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
static
LF_SLIST
*
lsearch
(
LF_SLIST
*
volatile
*
head
,
CHARSET_INFO
*
cs
,
uint32
hashnr
,
const
uchar
*
key
,
uint
keylen
,
uint32
hashnr
,
const
byte
*
key
,
uint
keylen
,
LF_PINS
*
pins
)
LF_PINS
*
pins
)
{
{
CURSOR
cursor
;
CURSOR
cursor
;
int
res
=
lfind
(
head
,
cs
,
hashnr
,
key
,
keylen
,
&
cursor
,
pins
);
int
res
=
lfind
(
head
,
cs
,
hashnr
,
key
,
keylen
,
&
cursor
,
pins
);
if
(
res
)
_lf_pin
(
pins
,
2
,
cursor
.
curr
);
if
(
res
)
_lf_pin
(
pins
,
2
,
cursor
.
curr
);
_lf_unpin
(
pins
,
0
);
_lf_unpin
(
pins
,
0
);
_lf_unpin
(
pins
,
1
);
_lf_unpin
(
pins
,
1
);
return
res
?
cursor
.
curr
:
0
;
return
res
?
cursor
.
curr
:
0
;
}
}
static
inline
const
uchar
*
hash_key
(
const
LF_HASH
*
hash
,
static
inline
const
byte
*
hash_key
(
const
LF_HASH
*
hash
,
const
uchar
*
record
,
uint
*
length
)
const
byte
*
record
,
uint
*
length
)
{
{
if
(
hash
->
get_key
)
if
(
hash
->
get_key
)
return
(
*
hash
->
get_key
)(
record
,
length
,
0
);
return
(
*
hash
->
get_key
)(
record
,
length
,
0
);
*
length
=
hash
->
key_length
;
*
length
=
hash
->
key_length
;
return
record
+
hash
->
key_offset
;
return
record
+
hash
->
key_offset
;
}
}
static
inline
uint
calc_hash
(
LF_HASH
*
hash
,
const
uchar
*
key
,
uint
keylen
)
static
inline
uint
calc_hash
(
LF_HASH
*
hash
,
const
byte
*
key
,
uint
keylen
)
{
{
ulong
nr1
=
1
,
nr2
=
4
;
ulong
nr1
=
1
,
nr2
=
4
;
hash
->
charset
->
coll
->
hash_sort
(
hash
->
charset
,
key
,
keylen
,
&
nr1
,
&
nr2
);
hash
->
charset
->
coll
->
hash_sort
(
hash
->
charset
,
key
,
keylen
,
&
nr1
,
&
nr2
);
return
nr1
&
INT_MAX32
;
return
nr1
&
INT_MAX32
;
}
}
...
@@ -258,28 +259,28 @@ void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
...
@@ -258,28 +259,28 @@ void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
lf_alloc_init
(
&
hash
->
alloc
,
sizeof
(
LF_SLIST
)
+
element_size
,
lf_alloc_init
(
&
hash
->
alloc
,
sizeof
(
LF_SLIST
)
+
element_size
,
offsetof
(
LF_SLIST
,
key
));
offsetof
(
LF_SLIST
,
key
));
lf_dynarray_init
(
&
hash
->
array
,
sizeof
(
LF_SLIST
**
));
lf_dynarray_init
(
&
hash
->
array
,
sizeof
(
LF_SLIST
**
));
hash
->
size
=
1
;
hash
->
size
=
1
;
hash
->
count
=
0
;
hash
->
count
=
0
;
hash
->
element_size
=
element_size
;
hash
->
element_size
=
element_size
;
hash
->
flags
=
flags
;
hash
->
flags
=
flags
;
hash
->
charset
=
charset
?
charset
:
&
my_charset_bin
;
hash
->
charset
=
charset
?
charset
:
&
my_charset_bin
;
hash
->
key_offset
=
key_offset
;
hash
->
key_offset
=
key_offset
;
hash
->
key_length
=
key_length
;
hash
->
key_length
=
key_length
;
hash
->
get_key
=
get_key
;
hash
->
get_key
=
get_key
;
DBUG_ASSERT
(
get_key
?
!
key_offset
&&
!
key_length
:
key_length
);
DBUG_ASSERT
(
get_key
?
!
key_offset
&&
!
key_length
:
key_length
);
}
}
void
lf_hash_destroy
(
LF_HASH
*
hash
)
void
lf_hash_destroy
(
LF_HASH
*
hash
)
{
{
LF_SLIST
*
el
=*
(
LF_SLIST
**
)
_lf_dynarray_lvalue
(
&
hash
->
array
,
0
);
LF_SLIST
*
el
=
*
(
LF_SLIST
**
)
_lf_dynarray_lvalue
(
&
hash
->
array
,
0
);
while
(
el
)
while
(
el
)
{
{
intptr
next
=
el
->
link
;
intptr
next
=
el
->
link
;
if
(
el
->
hashnr
&
1
)
if
(
el
->
hashnr
&
1
)
lf_alloc_real_free
(
&
hash
->
alloc
,
el
);
lf_alloc_real_free
(
&
hash
->
alloc
,
el
);
else
else
my_free
((
void
*
)
el
,
MYF
(
0
));
my_free
((
void
*
)
el
,
MYF
(
0
));
el
=
(
LF_SLIST
*
)
next
;
el
=
(
LF_SLIST
*
)
next
;
}
}
lf_alloc_destroy
(
&
hash
->
alloc
);
lf_alloc_destroy
(
&
hash
->
alloc
);
lf_dynarray_destroy
(
&
hash
->
array
);
lf_dynarray_destroy
(
&
hash
->
array
);
...
@@ -299,19 +300,19 @@ void lf_hash_destroy(LF_HASH *hash)
...
@@ -299,19 +300,19 @@ void lf_hash_destroy(LF_HASH *hash)
*/
*/
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
)
int
lf_hash_insert
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
data
)
{
{
u
int
csize
,
bucket
,
hashnr
;
int
csize
,
bucket
,
hashnr
;
LF_SLIST
*
node
,
*
volatile
*
el
;
LF_SLIST
*
node
,
*
volatile
*
el
;
lf_rwlock_by_pins
(
pins
);
lf_rwlock_by_pins
(
pins
);
node
=
(
LF_SLIST
*
)
_lf_alloc_new
(
pins
);
node
=
(
LF_SLIST
*
)
_lf_alloc_new
(
pins
);
memcpy
(
node
+
1
,
data
,
hash
->
element_size
);
memcpy
(
node
+
1
,
data
,
hash
->
element_size
);
node
->
key
=
hash_key
(
hash
,
(
uchar
*
)(
node
+
1
),
&
node
->
keylen
);
node
->
key
=
hash_key
(
hash
,
(
byte
*
)(
node
+
1
),
&
node
->
keylen
);
hashnr
=
calc_hash
(
hash
,
node
->
key
,
node
->
keylen
);
hashnr
=
calc_hash
(
hash
,
node
->
key
,
node
->
keylen
);
bucket
=
hashnr
%
hash
->
size
;
bucket
=
hashnr
%
hash
->
size
;
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
node
->
hashnr
=
my_reverse_bits
(
hashnr
)
|
1
;
node
->
hashnr
=
my_reverse_bits
(
hashnr
)
|
1
;
if
(
linsert
(
el
,
hash
->
charset
,
node
,
pins
,
hash
->
flags
))
if
(
linsert
(
el
,
hash
->
charset
,
node
,
pins
,
hash
->
flags
))
{
{
_lf_alloc_free
(
pins
,
node
);
_lf_alloc_free
(
pins
,
node
);
...
@@ -335,15 +336,15 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
...
@@ -335,15 +336,15 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
int
lf_hash_delete
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
{
{
LF_SLIST
*
volatile
*
el
;
LF_SLIST
*
volatile
*
el
;
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
uchar
*
)
key
,
keylen
);
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
byte
*
)
key
,
keylen
);
bucket
=
hashnr
%
hash
->
size
;
bucket
=
hashnr
%
hash
->
size
;
lf_rwlock_by_pins
(
pins
);
lf_rwlock_by_pins
(
pins
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
if
(
ldelete
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
if
(
ldelete
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
(
uchar
*
)
key
,
keylen
,
pins
))
(
byte
*
)
key
,
keylen
,
pins
))
{
{
lf_rwunlock_by_pins
(
pins
);
lf_rwunlock_by_pins
(
pins
);
return
1
;
return
1
;
...
@@ -360,33 +361,33 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
...
@@ -360,33 +361,33 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
void
*
lf_hash_search
(
LF_HASH
*
hash
,
LF_PINS
*
pins
,
const
void
*
key
,
uint
keylen
)
{
{
LF_SLIST
*
volatile
*
el
,
*
found
;
LF_SLIST
*
volatile
*
el
,
*
found
;
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
uchar
*
)
key
,
keylen
);
uint
bucket
,
hashnr
=
calc_hash
(
hash
,
(
byte
*
)
key
,
keylen
);
bucket
=
hashnr
%
hash
->
size
;
bucket
=
hashnr
%
hash
->
size
;
lf_rwlock_by_pins
(
pins
);
lf_rwlock_by_pins
(
pins
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
bucket
);
if
(
*
el
==
NULL
)
if
(
*
el
==
NULL
)
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
initialize_bucket
(
hash
,
el
,
bucket
,
pins
);
found
=
lsearch
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
found
=
lsearch
(
el
,
hash
->
charset
,
my_reverse_bits
(
hashnr
)
|
1
,
(
uchar
*
)
key
,
keylen
,
pins
);
(
byte
*
)
key
,
keylen
,
pins
);
lf_rwunlock_by_pins
(
pins
);
lf_rwunlock_by_pins
(
pins
);
return
found
?
found
+
1
:
0
;
return
found
?
found
+
1
:
0
;
}
}
static
char
*
dummy_key
=
""
;
static
char
*
dummy_key
=
""
;
static
void
initialize_bucket
(
LF_HASH
*
hash
,
LF_SLIST
*
volatile
*
node
,
static
void
initialize_bucket
(
LF_HASH
*
hash
,
LF_SLIST
*
volatile
*
node
,
uint
bucket
,
LF_PINS
*
pins
)
uint
bucket
,
LF_PINS
*
pins
)
{
{
uint
parent
=
my_clear_highest_bit
(
bucket
);
uint
parent
=
my_clear_highest_bit
(
bucket
);
LF_SLIST
*
dummy
=
(
LF_SLIST
*
)
my_malloc
(
sizeof
(
LF_SLIST
),
MYF
(
MY_WME
));
LF_SLIST
*
dummy
=
(
LF_SLIST
*
)
my_malloc
(
sizeof
(
LF_SLIST
),
MYF
(
MY_WME
));
LF_SLIST
**
tmp
=
0
,
*
cur
;
LF_SLIST
**
tmp
=
0
,
*
cur
;
LF_SLIST
*
volatile
*
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
parent
);
LF_SLIST
*
volatile
*
el
=
_lf_dynarray_lvalue
(
&
hash
->
array
,
parent
);
if
(
*
el
==
NULL
&&
bucket
)
if
(
*
el
==
NULL
&&
bucket
)
initialize_bucket
(
hash
,
el
,
parent
,
pins
);
initialize_bucket
(
hash
,
el
,
parent
,
pins
);
dummy
->
hashnr
=
my_reverse_bits
(
bucket
);
dummy
->
hashnr
=
my_reverse_bits
(
bucket
);
dummy
->
key
=
dummy_key
;
dummy
->
key
=
dummy_key
;
dummy
->
keylen
=
0
;
dummy
->
keylen
=
0
;
if
((
cur
=
linsert
(
el
,
hash
->
charset
,
dummy
,
pins
,
0
)))
if
((
cur
=
linsert
(
el
,
hash
->
charset
,
dummy
,
pins
,
0
)))
{
{
my_free
((
void
*
)
dummy
,
MYF
(
0
));
my_free
((
void
*
)
dummy
,
MYF
(
0
));
...
...
mysys/my_getsystime.c
View file @
3f4aa5f7
...
@@ -35,10 +35,6 @@ ulonglong my_getsystime()
...
@@ -35,10 +35,6 @@ ulonglong my_getsystime()
LARGE_INTEGER
t_cnt
;
LARGE_INTEGER
t_cnt
;
if
(
!
offset
)
if
(
!
offset
)
{
{
/* strictly speaking there should be a mutex to protect
initialization section. But my_getsystime() is called from
UUID() code, and UUID() calls are serialized with a mutex anyway
*/
LARGE_INTEGER
li
;
LARGE_INTEGER
li
;
FILETIME
ft
;
FILETIME
ft
;
GetSystemTimeAsFileTime
(
&
ft
);
GetSystemTimeAsFileTime
(
&
ft
);
...
...
storage/maria/lockman.h
View file @
3f4aa5f7
...
@@ -32,7 +32,7 @@
...
@@ -32,7 +32,7 @@
SLX - Shared + Loose eXclusive
SLX - Shared + Loose eXclusive
LSIX - Loose Shared + Intention eXclusive
LSIX - Loose Shared + Intention eXclusive
*/
*/
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
};
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
,
LOCK_TYPE_LAST
};
struct
lockman_lock
;
struct
lockman_lock
;
...
@@ -55,9 +55,10 @@ typedef struct {
...
@@ -55,9 +55,10 @@ typedef struct {
uint
lock_timeout
;
uint
lock_timeout
;
loid_to_lo_func
*
loid_to_lo
;
loid_to_lo_func
*
loid_to_lo
;
}
LOCKMAN
;
}
LOCKMAN
;
#define DIDNT_GET_THE_LOCK 0
enum
lockman_getlock_result
{
enum
lockman_getlock_result
{
DIDNT_GET_THE_LOCK
=
0
,
GOT_THE_LOCK
,
NO_MEMORY_FOR_LOCK
=
1
,
DEADLOCK
,
LOCK_TIMEOUT
,
GOT_THE_LOCK
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
};
};
...
...
storage/maria/tablockman.c
View file @
3f4aa5f7
This diff is collapsed.
Click to expand it.
storage/maria/tablockman.h
View file @
3f4aa5f7
...
@@ -33,44 +33,44 @@
...
@@ -33,44 +33,44 @@
LSIX - Loose Shared + Intention eXclusive
LSIX - Loose Shared + Intention eXclusive
*/
*/
#ifndef _lockman_h
#ifndef _lockman_h
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
};
#warning TODO remove N-locks
enum
lock_type
{
N
,
S
,
X
,
IS
,
IX
,
SIX
,
LS
,
LX
,
SLX
,
LSIX
,
LOCK_TYPE_LAST
};
enum
lockman_getlock_result
{
enum
lockman_getlock_result
{
DIDNT_GET_THE_LOCK
=
0
,
GOT_THE_LOCK
,
NO_MEMORY_FOR_LOCK
=
1
,
DEADLOCK
,
LOCK_TIMEOUT
,
GOT_THE_LOCK
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
,
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
};
};
#endif
#endif
#define LOCK_TYPES
LSIX
#define LOCK_TYPES
(LOCK_TYPE_LAST-1)
typedef
struct
st_table_lock_owner
TABLE_LOCK_OWNER
;
typedef
struct
st_table_lock
TABLE_LOCK
;
typedef
struct
st_table_lock
TABLE_LOCK
;
typedef
struct
st_locked_table
LOCKED_TABLE
;
typedef
TABLE_LOCK_OWNER
*
loid_to_tlo_func
(
uint16
);
typedef
struct
{
typedef
struct
st_table_lock_owner
{
pthread_mutex_t
pool_mutex
;
TABLE_LOCK
*
pool
;
/* lifo pool of free locks */
uint
lock_timeout
;
loid_to_tlo_func
*
loid_to_tlo
;
/* for mapping loid to TABLE_LOCK_OWNER */
}
TABLOCKMAN
;
struct
st_table_lock_owner
{
TABLE_LOCK
*
active_locks
;
/* list of active locks */
TABLE_LOCK
*
active_locks
;
/* list of active locks */
TABLE_LOCK
*
waiting_lock
;
/* waiting lock (one lock only) */
TABLE_LOCK
*
waiting_lock
;
/* waiting lock (one lock only) */
TABLE_LOCK_OWNER
*
waiting_for
;
/* transaction we're wating for
*/
struct
st_table_lock_owner
*
waiting_for
;
/* transaction we're waiting for
*/
pthread_cond_t
*
cond
;
/* transactions waiting for us, wait on 'cond' */
pthread_cond_t
*
cond
;
/* transactions waiting for us, wait on 'cond' */
pthread_mutex_t
*
mutex
;
/* mutex is required to use 'cond' */
pthread_mutex_t
*
mutex
;
/* mutex is required to use 'cond' */
uint16
loid
;
/* Lock Owner IDentifier
*/
uint16
loid
,
waiting_for_loid
;
/* Lock Owner IDentifier
*/
};
}
TABLE_LOCK_OWNER
;
struct
st_locked_table
{
typedef
struct
st_locked_table
{
pthread_mutex_t
mutex
;
/* mutex for everything below */
pthread_mutex_t
mutex
;
/* mutex for everything below */
HASH
active
;
/* active locks ina hash
*/
HASH
latest_locks
;
/* latest locks in a hash
*/
TABLE_LOCK
*
active_locks
[
LOCK_TYPES
];
/* dl-list of locks per type */
TABLE_LOCK
*
active_locks
[
LOCK_TYPES
];
/* dl-list of locks per type */
TABLE_LOCK
*
wait_queue_in
,
*
wait_queue_out
;
/* wait deque */
TABLE_LOCK
*
wait_queue_in
,
*
wait_queue_out
;
/* wait deque (double-end queue)*/
};
}
LOCKED_TABLE
;
typedef
TABLE_LOCK_OWNER
*
loid_to_tlo_func
(
uint16
);
typedef
struct
{
pthread_mutex_t
pool_mutex
;
TABLE_LOCK
*
pool
;
/* lifo pool of free locks */
uint
lock_timeout
;
/* lock timeout in milliseconds */
loid_to_tlo_func
*
loid_to_tlo
;
/* for mapping loid to TABLE_LOCK_OWNER */
}
TABLOCKMAN
;
void
tablockman_init
(
TABLOCKMAN
*
,
loid_to_tlo_func
*
,
uint
);
void
tablockman_init
(
TABLOCKMAN
*
,
loid_to_tlo_func
*
,
uint
);
void
tablockman_destroy
(
TABLOCKMAN
*
);
void
tablockman_destroy
(
TABLOCKMAN
*
);
...
@@ -81,7 +81,7 @@ void tablockman_init_locked_table(LOCKED_TABLE *, int);
...
@@ -81,7 +81,7 @@ void tablockman_init_locked_table(LOCKED_TABLE *, int);
void
tablockman_destroy_locked_table
(
LOCKED_TABLE
*
);
void
tablockman_destroy_locked_table
(
LOCKED_TABLE
*
);
#ifdef EXTRA_DEBUG
#ifdef EXTRA_DEBUG
void
print_tlo
(
TABLE_LOCK_OWNER
*
);
void
tablockman_
print_tlo
(
TABLE_LOCK_OWNER
*
);
#endif
#endif
#endif
#endif
...
...
storage/maria/trnman.c
View file @
3f4aa5f7
...
@@ -69,7 +69,8 @@ static TRN *short_trid_to_TRN(uint16 short_trid)
...
@@ -69,7 +69,8 @@ static TRN *short_trid_to_TRN(uint16 short_trid)
return
(
TRN
*
)
trn
;
return
(
TRN
*
)
trn
;
}
}
static
byte
*
trn_get_hash_key
(
const
byte
*
trn
,
uint
*
len
,
my_bool
unused
)
static
byte
*
trn_get_hash_key
(
const
byte
*
trn
,
uint
*
len
,
my_bool
unused
__attribute__
((
unused
)))
{
{
*
len
=
sizeof
(
TrID
);
*
len
=
sizeof
(
TrID
);
return
(
byte
*
)
&
((
*
((
TRN
**
)
trn
))
->
trid
);
return
(
byte
*
)
&
((
*
((
TRN
**
)
trn
))
->
trid
);
...
...
storage/maria/unittest/lockman-t.c
View file @
3f4aa5f7
...
@@ -14,6 +14,10 @@
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
lockman for row and table locks
*/
//#define EXTRA_VERBOSE
//#define EXTRA_VERBOSE
#include <tap.h>
#include <tap.h>
...
...
storage/maria/unittest/lockman1-t.c
View file @
3f4aa5f7
...
@@ -14,6 +14,10 @@
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
lockman for row locks, tablockman for table locks
*/
//#define EXTRA_VERBOSE
//#define EXTRA_VERBOSE
#include <tap.h>
#include <tap.h>
...
@@ -64,7 +68,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
...
@@ -64,7 +68,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
#define lock_ok_l(O, R, L) \
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ",
DIDNT_GET_THE_LOCK
);
test_lock(O, R, L, "cannot ",
LOCK_TIMEOUT
);
void
test_tablockman_simple
()
void
test_tablockman_simple
()
{
{
...
@@ -164,8 +168,11 @@ int Ntables= 10;
...
@@ -164,8 +168,11 @@ int Ntables= 10;
int
table_lock_ratio
=
10
;
int
table_lock_ratio
=
10
;
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
res2str
[
4
]
=
{
char
*
res2str
[]
=
{
"DIDN'T GET THE LOCK"
,
"DIDN'T GET THE LOCK"
,
"OUT OF MEMORY"
,
"DEADLOCK"
,
"LOCK TIMEOUT"
,
"GOT THE LOCK"
,
"GOT THE LOCK"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
...
@@ -191,7 +198,7 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -191,7 +198,7 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
lock2str
[
locklevel
],
res2str
[
res
]));
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
{
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
DIAG
((
"loid %2d, release all locks"
,
loid
));
...
@@ -208,11 +215,6 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -208,11 +215,6 @@ pthread_handler_t test_lockman(void *arg)
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
switch
(
res
)
switch
(
res
)
{
{
case
DIDNT_GET_THE_LOCK
:
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
case
GOT_THE_LOCK
:
case
GOT_THE_LOCK
:
continue
;
continue
;
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
...
@@ -232,7 +234,10 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -232,7 +234,10 @@ pthread_handler_t test_lockman(void *arg)
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
continue
;
continue
;
default:
default:
DBUG_ASSERT
(
0
);
lockman_release_locks
(
&
lockman
,
lo
);
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
}
}
}
}
}
}
...
...
storage/maria/unittest/lockman2-t.c
View file @
3f4aa5f7
...
@@ -14,6 +14,10 @@
...
@@ -14,6 +14,10 @@
along with this program; if not, write to the Free Software
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
tablockman for row and table locks
*/
//#define EXTRA_VERBOSE
//#define EXTRA_VERBOSE
#include <tap.h>
#include <tap.h>
...
@@ -57,7 +61,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
...
@@ -57,7 +61,7 @@ TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
#define lock_ok_l(O, R, L) \
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ",
DIDNT_GET_THE_LOCK
);
test_lock(O, R, L, "cannot ",
LOCK_TIMEOUT
);
void
test_tablockman_simple
()
void
test_tablockman_simple
()
{
{
...
@@ -165,14 +169,34 @@ void run_test(const char *test, pthread_handler handler, int n, int m)
...
@@ -165,14 +169,34 @@ void run_test(const char *test, pthread_handler handler, int n, int m)
my_free
((
void
*
)
threads
,
MYF
(
0
));
my_free
((
void
*
)
threads
,
MYF
(
0
));
}
}
static
void
reinit_tlo
(
TABLOCKMAN
*
lm
,
TABLE_LOCK_OWNER
*
lo
)
{
TABLE_LOCK_OWNER
backup
=
*
lo
;
tablockman_release_locks
(
lm
,
lo
);
/*
pthread_mutex_destroy(lo->mutex);
pthread_cond_destroy(lo->cond);
bzero(lo, sizeof(*lo));
lo->mutex= backup.mutex;
lo->cond= backup.cond;
lo->loid= backup.loid;
pthread_mutex_init(lo->mutex, MY_MUTEX_INIT_FAST);
pthread_cond_init(lo->cond, 0);*/
}
pthread_mutex_t
rt_mutex
;
pthread_mutex_t
rt_mutex
;
int
Nrows
=
100
;
int
Nrows
=
100
;
int
Ntables
=
10
;
int
Ntables
=
10
;
int
table_lock_ratio
=
10
;
int
table_lock_ratio
=
10
;
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
enum
lock_type
lock_array
[
6
]
=
{
S
,
X
,
LS
,
LX
,
IS
,
IX
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
lock2str
[
6
]
=
{
"S"
,
"X"
,
"LS"
,
"LX"
,
"IS"
,
"IX"
};
char
*
res2str
[
4
]
=
{
char
*
res2str
[]
=
{
"DIDN'T GET THE LOCK"
,
0
,
"OUT OF MEMORY"
,
"DEADLOCK"
,
"LOCK TIMEOUT"
,
"GOT THE LOCK"
,
"GOT THE LOCK"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE"
,
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"
};
...
@@ -200,9 +224,9 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -200,9 +224,9 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
table
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
DIAG
((
"loid %2d, table %d, lock %s, res %s"
,
loid
,
table
,
lock2str
[
locklevel
],
res2str
[
res
]));
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
{
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
timeout
++
;
continue
;
continue
;
...
@@ -217,11 +241,6 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -217,11 +241,6 @@ pthread_handler_t test_lockman(void *arg)
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
lock2str
[
locklevel
+
4
],
res2str
[
res
]));
switch
(
res
)
switch
(
res
)
{
{
case
DIDNT_GET_THE_LOCK
:
tablockman_release_locks
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
case
GOT_THE_LOCK
:
case
GOT_THE_LOCK
:
continue
;
continue
;
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
case
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
:
...
@@ -230,9 +249,9 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -230,9 +249,9 @@ pthread_handler_t test_lockman(void *arg)
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
row
,
lock_array
[
locklevel
]);
res
=
tablockman_getlock
(
&
tablockman
,
lo1
,
ltarray
+
row
,
lock_array
[
locklevel
]);
DIAG
((
"loid %2d, ROW %d, lock %s, res %s"
,
loid
,
row
,
DIAG
((
"loid %2d, ROW %d, lock %s, res %s"
,
loid
,
row
,
lock2str
[
locklevel
],
res2str
[
res
]));
lock2str
[
locklevel
],
res2str
[
res
]));
if
(
res
==
DIDNT_GE
T_THE_LOCK
)
if
(
res
<
GO
T_THE_LOCK
)
{
{
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
timeout
++
;
continue
;
continue
;
...
@@ -240,12 +259,15 @@ pthread_handler_t test_lockman(void *arg)
...
@@ -240,12 +259,15 @@ pthread_handler_t test_lockman(void *arg)
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
DBUG_ASSERT
(
res
==
GOT_THE_LOCK
);
continue
;
continue
;
default:
default:
DBUG_ASSERT
(
0
);
reinit_tlo
(
&
tablockman
,
lo1
);
DIAG
((
"loid %2d, release all locks"
,
loid
));
timeout
++
;
continue
;
}
}
}
}
}
}
tablockman_release_locks
(
&
tablockman
,
lo1
);
reinit_tlo
(
&
tablockman
,
lo1
);
pthread_mutex_lock
(
&
rt_mutex
);
pthread_mutex_lock
(
&
rt_mutex
);
rt_num_threads
--
;
rt_num_threads
--
;
...
@@ -264,7 +286,7 @@ int main()
...
@@ -264,7 +286,7 @@ int main()
my_init
();
my_init
();
pthread_mutex_init
(
&
rt_mutex
,
0
);
pthread_mutex_init
(
&
rt_mutex
,
0
);
plan
(
39
);
plan
(
40
);
if
(
my_atomic_initialize
())
if
(
my_atomic_initialize
())
return
exit_status
();
return
exit_status
();
...
@@ -299,7 +321,7 @@ int main()
...
@@ -299,7 +321,7 @@ int main()
Nrows
=
100
;
Nrows
=
100
;
Ntables
=
10
;
Ntables
=
10
;
table_lock_ratio
=
10
;
table_lock_ratio
=
10
;
//
run_test("\"random lock\" stress test", test_lockman, THREADS, CYCLES);
run_test
(
"
\"
random lock
\"
stress test"
,
test_lockman
,
THREADS
,
CYCLES
);
#if 0
#if 0
/* "real-life" simulation - many rows, no table locks */
/* "real-life" simulation - many rows, no table locks */
Nrows= 1000000;
Nrows= 1000000;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment