Commit 45109713 authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-maria

into  janus.mylan:/usr/home/serg/Abk/mysql-maria

parents e39aae7c aad040fd
......@@ -71,13 +71,15 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
bzero(array, sizeof(*array));
}
static const int dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
static const long dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
{
0, /* +1 here to to avoid -1's below */
LF_DYNARRAY_LEVEL_LENGTH,
LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH,
LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH +
LF_DYNARRAY_LEVEL_LENGTH,
LF_DYNARRAY_LEVEL_LENGTH * LF_DYNARRAY_LEVEL_LENGTH *
LF_DYNARRAY_LEVEL_LENGTH
LF_DYNARRAY_LEVEL_LENGTH + LF_DYNARRAY_LEVEL_LENGTH *
LF_DYNARRAY_LEVEL_LENGTH + LF_DYNARRAY_LEVEL_LENGTH
};
/*
......
......@@ -53,7 +53,7 @@ maria_pack_LDADD= @CLIENT_EXTRA_LDFLAGS@ libmaria.a \
noinst_PROGRAMS = ma_test1 ma_test2 ma_test3 ma_rt_test ma_sp_test
noinst_HEADERS = maria_def.h ma_rt_index.h ma_rt_key.h ma_rt_mbr.h \
ma_sp_defs.h ma_fulltext.h ma_ftdefs.h ma_ft_test1.h \
ma_ft_eval.h trnman.h lockman.h \
ma_ft_eval.h trnman.h lockman.h tablockman.h \
ma_control_file.h ha_maria.h
ma_test1_DEPENDENCIES= $(LIBRARIES)
ma_test1_LDADD= @CLIENT_EXTRA_LDFLAGS@ libmaria.a \
......@@ -108,7 +108,7 @@ libmaria_a_SOURCES = ma_init.c ma_open.c ma_extra.c ma_info.c ma_rkey.c \
ma_keycache.c ma_preload.c ma_ft_parser.c \
ma_ft_update.c ma_ft_boolean_search.c \
ma_ft_nlq_search.c ft_maria.c ma_sort.c \
ha_maria.cc trnman.c lockman.c \
ha_maria.cc trnman.c lockman.c tablockman.c \
ma_rt_index.c ma_rt_key.c ma_rt_mbr.c ma_rt_split.c \
ma_sp_key.c ma_control_file.c
CLEANFILES = test?.MA? FT?.MA? isam.log ma_test_all ma_rt_test.MA? sp_test.MA?
......
// TODO - allocate everything from dynarrays !!! (benchmark)
// TODO instant duration locks
// automatically place S instead of LS if possible
/*
TODO optimization: table locks - they have completely
different characteristics. long lists, few distinct resources -
slow to scan, [possibly] high retry rate
*/
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
......@@ -68,9 +64,9 @@
it will wait for other locks. Here's an exception to "locks are added
to the end" rule - upgraded locks are added after the last active lock
but before all waiting locks. Old lock (the one we upgraded from) is
not removed from the list, indeed we may need to return to it later if
the new lock was in a savepoint that gets rolled back. So old lock is
marked as "ignored" (IGNORE_ME flag). New lock gets an UPGRADED flag.
not removed from the list, indeed it may be needed if the new lock was
in a savepoint that gets rolled back. So old lock is marked as "ignored"
(IGNORE_ME flag). New lock gets an UPGRADED flag.
Loose locks add an important exception to the above. Loose locks do not
always commute with other locks. In the list IX-LS both locks are active,
......@@ -90,12 +86,12 @@
variable a conflicting lock is returned and the calling thread waits on a
pthread condition in the LOCK_OWNER structure of the owner of the
conflicting lock. Or a new lock is compatible with all locks, but some
existing locks are not compatible with previous locks (example: request IS,
existing locks are not compatible with each other (example: request IS,
when the list is S-IX) - that is not all locks are active. In this case a
first waiting lock is returned in the 'blocker' variable,
lockman_getlock() notices that a "blocker" does not conflict with the
requested lock, and "dereferences" it, to find the lock that it's waiting
on. The calling thread than begins to wait on the same lock.
first waiting lock is returned in the 'blocker' variable, lockman_getlock()
notices that a "blocker" does not conflict with the requested lock, and
"dereferences" it, to find the lock that it's waiting on. The calling
thread than begins to wait on the same lock.
To better support table-row relations where one needs to lock the table
with an intention lock before locking the row, extended diagnostics is
......@@ -107,6 +103,10 @@
whether it's possible to lock the row, but no need to lock it - perhaps
the thread has a loose lock on this table). This is defined by
getlock_result[] table.
TODO optimization: table locks - they have completely
different characteristics. long lists, few distinct resources -
slow to scan, [possibly] high retry rate
*/
#include <my_global.h>
......@@ -316,7 +316,7 @@ static int lockfind(LOCK * volatile *head, LOCK *node,
DBUG_ASSERT(prev_active == TRUE);
else
cur_active&= lock_compatibility_matrix[prev_lock][cur_lock];
if (upgrading && !cur_active)
if (upgrading && !cur_active /*&& !(cur_flags & UPGRADED)*/)
break;
if (prev_active && !cur_active)
{
......@@ -327,7 +327,7 @@ static int lockfind(LOCK * volatile *head, LOCK *node,
{
/* we already have a lock on this resource */
DBUG_ASSERT(lock_combining_matrix[cur_lock][lock] != N);
DBUG_ASSERT(!upgrading); /* can happen only once */
DBUG_ASSERT(!upgrading || (flags & IGNORE_ME));
if (lock_combining_matrix[cur_lock][lock] == cur_lock)
{
/* new lock is compatible */
......@@ -380,7 +380,7 @@ static int lockfind(LOCK * volatile *head, LOCK *node,
*/
if (upgrading)
{
if (compatible)
if (compatible /*&& prev_active*/)
return PLACE_NEW_DISABLE_OLD;
else
return REQUEST_NEW_DISABLE_OLD;
......@@ -431,6 +431,9 @@ static int lockinsert(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
}
if (res & LOCK_UPGRADE)
cursor.upgrade_from->flags|= IGNORE_ME;
#warning is this OK ? if a reader has already read upgrade_from, \
it may find it conflicting with node :(
//#error another bug - see the last test from test_lockman_simple()
}
} while (res == REPEAT_ONCE_MORE);
......@@ -439,8 +442,8 @@ static int lockinsert(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
_lf_unpin(pins, 2);
/*
note that blocker is not necessarily pinned here (when it's == curr).
this is ok as it's either a dummy node then for initialize_bucket
and dummy nodes don't need pinning,
this is ok as in such a case it's either a dummy node for
initialize_bucket() and dummy nodes don't need pinning,
or it's a lock of the same transaction for lockman_getlock,
and it cannot be removed by another thread
*/
......@@ -484,9 +487,15 @@ static int lockdelete(LOCK * volatile *head, LOCK *node, LF_PINS *pins)
res= lockfind(head, node, &cursor, pins);
DBUG_ASSERT(res & ALREADY_HAVE);
if (cursor.upgrade_from)
cursor.upgrade_from->flags&= ~IGNORE_ME;
/*
XXX this does not work with savepoints, as old lock is left ignored.
It cannot be unignored, as would basically mean moving the lock back
in the lock chain (from upgraded). And the latter is not allowed -
because it breaks list scanning. So old ignored lock must be deleted,
new - same - lock must be installed right after the lock we're deleting,
then we can delete. Good news is - this is only required when rolling
back a savepoint.
*/
if (my_atomic_casptr((void **)&(cursor.curr->link),
(void **)&cursor.next, 1+(char *)cursor.next))
{
......@@ -497,11 +506,7 @@ static int lockdelete(LOCK * volatile *head, LOCK *node, LF_PINS *pins)
lockfind(head, node, &cursor, pins);
}
else
{
res= REPEAT_ONCE_MORE;
if (cursor.upgrade_from) /* to satisfy the assert in lockfind */
cursor.upgrade_from->flags|= IGNORE_ME;
}
} while (res == REPEAT_ONCE_MORE);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
......
// TODO - allocate everything from dynarrays !!! (benchmark)
// automatically place S instead of LS if possible
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <my_global.h>
#include <my_sys.h>
#include <my_bit.h>
#include <lf.h>
#include "tablockman.h"
/*
Lock Manager for Table Locks
The code below handles locks on resources - but it is optimized for a
case when a number of resources is not very large, and there are many of
locks per resource - that is a resource is likely to be a table or a
database, but hardly a row in a table.
Locks belong to "lock owners". A Lock Owner is uniquely identified by a
16-bit number - loid (lock owner identifier). A function loid_to_tlo must
be provided by the application that takes such a number as an argument
and returns a TABLE_LOCK_OWNER structure.
Lock levels are completely defined by three tables. Lock compatibility
matrix specifies which locks can be held at the same time on a resource.
Lock combining matrix specifies what lock level has the same behaviour as
a pair of two locks of given levels. getlock_result matrix simplifies
intention locking and lock escalation for an application, basically it
defines which locks are intention locks and which locks are "loose"
locks. It is only used to provide better diagnostics for the
application, lock manager itself does not differentiate between normal,
intention, and loose locks.
The assumptions are: few distinct resources, many locks are held at the
same time on one resource. Thus: a lock structure _per resource_ can be
rather large; a lock structure _per lock_ does not need to be very small
either; we need to optimize for _speed_. Operations we need are: place a
lock, check if a particular transaction already has a lock on this
resource, check if a conflicting lock exists, if yes - find who owns it.
Solution: every resource has a structure with
1. Hash of "active" (see below for the description of "active") granted
locks with loid as a key. Thus, checking if a given transaction has a
lock on this resource is O(1) operation.
2. Doubly-linked lists of all granted locks - one list for every lock
type. Thus, checking if a conflicting lock exists is a check whether
an appropriate list head pointer is not null, also O(1).
3. Every lock has a loid of the owner, thus checking who owns a
conflicting lock is also O(1).
4. Deque of waiting locks. It's a deque not a fifo, because for lock
upgrades requests are added to the queue head, not tail. There's never
a need to scan the queue.
Result: adding or removing a lock is always a O(1) operation, it does not
depend on the number of locks on the resource, or number of transactions,
or number of resources. It _does_ depend on the number of different lock
levels - O(number_of_lock_levels) - but it's a constant.
Waiting: if there is a conflicting lock or if wait queue is not empty, a
requested lock cannot be granted at once. It is added to the end of the
wait queue. If there is a conflicting lock - the "blocker" transaction is
the owner of this lock. If there's no conflict but a queue was not empty,
than the "blocker" is the transaction that the owner of the lock at the
end of the queue is waiting for (in other words, our lock is added to the
end of the wait queue, and our blocker is the same as of the lock right
before us).
Lock upgrades: when a thread that has a lock on a given resource,
requests a new lock on the same resource and the old lock is not enough
to satisfy new lock requirements (which is defined by
lock_combining_matrix[old_lock][new_lock] != old_lock), a new lock
(defineded by lock_combining_matrix as above) is placed. Depending on
other granted locks it is immediately active or it has to wait. Here the
lock is added to the start of the waiting queue, not to the end. Old
lock, is removed from the hash, but not from the doubly-linked lists.
(indeed, a transaction checks "do I have a lock on this resource ?" by
looking in a hash, and it should find a latest lock, so old locks must be
removed; but a transaction checks "are the conflicting locks ?" by
checking doubly-linked lists, it doesn't matter if it will find an old
lock - if it would be removed, a new lock would be also a conflict).
To better support table-row relations where one needs to lock the table
with an intention lock before locking the row, extended diagnostics is
provided. When an intention lock (presumably on a table) is granted,
lockman_getlock() returns one of GOT_THE_LOCK (no need to lock the row,
perhaps the thread already has a normal lock on this table),
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE (need to lock the row, as usual),
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE (only need to check
whether it's possible to lock the row, but no need to lock it - perhaps
the thread has a loose lock on this table). This is defined by
getlock_result[] table.
Instant duration locks are not supported. Though they're trivial to add,
they are normally only used on rows, not on tables. So, presumably,
they are not needed here.
*/
/*
Lock compatibility matrix.
It's asymmetric. Read it as "Somebody has the lock <value in the row
label>, can I set the lock <value in the column label> ?"
') Though you can take LS lock while somebody has S lock, it makes no
sense - it's simpler to take S lock too.
1 - compatible
0 - incompatible
-1 - "impossible", so that we can assert the impossibility.
*/
static int lock_compatibility_matrix[10][10]=
{ /* N S X IS IX SIX LS LX SLX LSIX */
{ -1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* N */
{ -1, 1, 0, 1, 0, 0, 1, 0, 0, 0 }, /* S */
{ -1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* X */
{ -1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, /* IS */
{ -1, 0, 0, 1, 1, 0, 1, 1, 0, 1 }, /* IX */
{ -1, 0, 0, 1, 0, 0, 1, 0, 0, 0 }, /* SIX */
{ -1, 1, 0, 1, 0, 0, 1, 0, 0, 0 }, /* LS */
{ -1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* LX */
{ -1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* SLX */
{ -1, 0, 0, 1, 0, 0, 1, 0, 0, 0 } /* LSIX */
};
/*
Lock combining matrix.
It's symmetric. Read it as "what lock level L is identical to the
set of two locks A and B"
One should never get N from it, we assert the impossibility
*/
static enum lock_type lock_combining_matrix[10][10]=
{/* N S X IS IX SIX LS LX SLX LSIX */
{ N, S, X, IS, IX, SIX, S, SLX, SLX, SIX}, /* N */
{ S, S, X, S, SIX, SIX, S, SLX, SLX, SIX}, /* S */
{ X, X, X, X, X, X, X, X, X, X}, /* X */
{ IS, S, X, IS, IX, SIX, LS, LX, SLX, LSIX}, /* IS */
{ IX, SIX, X, IX, IX, SIX, LSIX, LX, SLX, LSIX}, /* IX */
{ SIX, SIX, X, SIX, SIX, SIX, SIX, SLX, SLX, SIX}, /* SIX */
{ LS, S, X, LS, LSIX, SIX, LS, LX, SLX, LSIX}, /* LS */
{ LX, SLX, X, LX, LX, SLX, LX, LX, SLX, LX}, /* LX */
{ SLX, SLX, X, SLX, SLX, SLX, SLX, SLX, SLX, SLX}, /* SLX */
{ LSIX, SIX, X, LSIX, LSIX, SIX, LSIX, LX, SLX, LSIX} /* LSIX */
};
/*
the return codes for lockman_getlock
It's asymmetric. Read it as "I have the lock <value in the row label>,
what value should be returned for <value in the column label> ?"
0 means impossible combination (assert!)
Defines below help to preserve the table structure.
I/L/A values are self explanatory
x means the combination is possible (assert should not crash)
but it cannot happen in row locks, only in table locks (S,X),
or lock escalations (LS,LX)
*/
#define I GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE
#define L GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
#define A GOT_THE_LOCK
#define x GOT_THE_LOCK
static enum lockman_getlock_result getlock_result[10][10]=
{/* N S X IS IX SIX LS LX SLX LSIX */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* N */
{ 0, x, 0, A, 0, 0, x, 0, 0, 0}, /* S */
{ 0, x, x, A, A, 0, x, x, 0, 0}, /* X */
{ 0, 0, 0, I, 0, 0, 0, 0, 0, 0}, /* IS */
{ 0, 0, 0, I, I, 0, 0, 0, 0, 0}, /* IX */
{ 0, x, 0, A, I, 0, x, 0, 0, 0}, /* SIX */
{ 0, 0, 0, L, 0, 0, x, 0, 0, 0}, /* LS */
{ 0, 0, 0, L, L, 0, x, x, 0, 0}, /* LX */
{ 0, x, 0, A, L, 0, x, x, 0, 0}, /* SLX */
{ 0, 0, 0, L, I, 0, x, 0, 0, 0} /* LSIX */
};
#undef I
#undef L
#undef A
#undef x
/*
this structure is optimized for a case when there're many locks
on the same resource - e.g. a table
*/
struct st_table_lock {
struct st_table_lock *next_in_lo, *upgraded_from, *next, *prev;
struct st_locked_table *table;
uint16 loid;
char lock_type;
};
#define hash_insert my_hash_insert /* for consistency :) */
#define remove_from_wait_queue(LOCK, TABLE) \
do \
{ \
if ((LOCK)->prev) \
{ \
DBUG_ASSERT((TABLE)->wait_queue_out != (LOCK)); \
(LOCK)->prev->next= (LOCK)->next; \
} \
else \
{ \
DBUG_ASSERT((TABLE)->wait_queue_out == (LOCK)); \
(TABLE)->wait_queue_out= (LOCK)->next; \
} \
if ((LOCK)->next) \
{ \
DBUG_ASSERT((TABLE)->wait_queue_in != (LOCK)); \
(LOCK)->next->prev= (LOCK)->prev; \
} \
else \
{ \
DBUG_ASSERT((TABLE)->wait_queue_in == (LOCK)); \
(TABLE)->wait_queue_in= (LOCK)->prev; \
} \
} while (0)
/*
DESCRIPTION
tries to lock a resource 'table' with a lock level 'lock'.
RETURN
see enum lockman_getlock_result
*/
enum lockman_getlock_result
tablockman_getlock(TABLOCKMAN *lm, TABLE_LOCK_OWNER *lo,
LOCKED_TABLE *table, enum lock_type lock)
{
TABLE_LOCK *old, *new, *blocker;
TABLE_LOCK_OWNER *wait_for;
ulonglong deadline;
struct timespec timeout;
enum lock_type new_lock;
int i;
pthread_mutex_lock(& table->mutex);
/* do we alreasy have a lock on this resource ? */
old= (TABLE_LOCK *)hash_search(& table->active, (byte *)&lo->loid,
sizeof(lo->loid));
/* and if yes, is it enough to satisfy the new request */
if (old && lock_combining_matrix[old->lock_type][lock] == old->lock_type)
{
/* yes */
pthread_mutex_unlock(& table->mutex);
return getlock_result[old->lock_type][lock];
}
/* no, placing a new lock. first - take a free lock structure from the pool */
pthread_mutex_lock(& lm->pool_mutex);
new= lm->pool;
if (new)
{
lm->pool= new->next;
pthread_mutex_unlock(& lm->pool_mutex);
}
else
{
pthread_mutex_unlock(& lm->pool_mutex);
new= (TABLE_LOCK *)my_malloc(sizeof(*new), MYF(MY_WME));
if (!new)
{
pthread_mutex_unlock(& table->mutex);
return DIDNT_GET_THE_LOCK;
}
}
/* calculate the level of the upgraded lock */
new_lock= old ? lock_combining_matrix[old->lock_type][lock] : lock;
new->loid= lo->loid;
new->lock_type= new_lock;
new->table= table;
/* and try to place it */
for (new->prev= table->wait_queue_in ; ; )
{
/* waiting queue is not empty and we're not upgrading */
if (!old && new->prev)
{
/* need to wait */
DBUG_ASSERT(table->wait_queue_out);
DBUG_ASSERT(table->wait_queue_in);
blocker= new->prev;
/* wait for a previous lock in the queue or for a lock it's waiting for */
if (lock_compatibility_matrix[blocker->lock_type][lock])
wait_for= lm->loid_to_tlo(blocker->loid)->waiting_for;
else
wait_for= lm->loid_to_tlo(blocker->loid);
}
else
{
/* checking for compatibility with existing locks */
for (blocker= 0, i= 0; i < LOCK_TYPES; i++)
{
if (table->active_locks[i] && !lock_compatibility_matrix[i+1][lock])
{
/* the first lock in the list may be our own - skip it */
for (blocker= table->active_locks[i];
blocker && blocker->loid == lo->loid;
blocker= blocker->next) /* no-op */;
if (blocker)
break;
}
}
if (!blocker) /* free to go */
break;
wait_for= lm->loid_to_tlo(blocker->loid);
}
/* ok, we're here - the wait is inevitable */
lo->waiting_for= wait_for;
if (!lo->waiting_lock) /* first iteration of the for() loop */
{
/* lock upgrade or new lock request ? */
if (old)
{
/* upgrade - add the lock to the _start_ of the wait queue */
new->prev= 0;
if ((new->next= table->wait_queue_out))
new->next->prev= new;
table->wait_queue_out= new;
if (!table->wait_queue_in)
table->wait_queue_in=table->wait_queue_out;
}
else
{
/* new lock - add the lock to the _end_ of the wait queue */
new->next= 0;
if ((new->prev= table->wait_queue_in))
new->prev->next= new;
table->wait_queue_in= new;
if (!table->wait_queue_out)
table->wait_queue_out=table->wait_queue_in;
}
lo->waiting_lock= new;
deadline= my_getsystime() + lm->lock_timeout * 10000;
timeout.tv_sec= deadline/10000000;
timeout.tv_nsec= (deadline % 10000000) * 100;
}
else
{
if (my_getsystime() > deadline)
{
pthread_mutex_unlock(& table->mutex);
return DIDNT_GET_THE_LOCK;
}
}
/* now really wait */
pthread_mutex_lock(wait_for->mutex);
pthread_mutex_unlock(& table->mutex);
pthread_cond_timedwait(wait_for->cond, wait_for->mutex, &timeout);
pthread_mutex_unlock(wait_for->mutex);
pthread_mutex_lock(& table->mutex);
/* ... and repeat from the beginning */
}
/* yeah! we can place the lock now */
/* remove the lock from the wait queue, if it was there */
if (lo->waiting_lock)
{
remove_from_wait_queue(new, table);
lo->waiting_lock= 0;
lo->waiting_for= 0;
}
/* add it to the list of all locks of this lock owner */
new->next_in_lo= lo->active_locks;
lo->active_locks= new;
/* and to the list of active locks of this lock type */
new->prev= 0;
if ((new->next= table->active_locks[new_lock-1]))
new->next->prev= new;
table->active_locks[new_lock-1]= new;
/* remove the old lock from the hash, if upgrading */
if (old)
{
new->upgraded_from= old;
hash_delete(& table->active, (byte *)old);
}
else
new->upgraded_from= 0;
/* and add a new lock to the hash, voila */
hash_insert(& table->active, (byte *)new);
pthread_mutex_unlock(& table->mutex);
return getlock_result[lock][lock];
}
/*
DESCRIPTION
release all locks belonging to a transaction.
signal waiters to continue
*/
void tablockman_release_locks(TABLOCKMAN *lm, TABLE_LOCK_OWNER *lo)
{
TABLE_LOCK *lock, *local_pool= 0, *local_pool_end;
/*
instead of adding released locks to a pool one by one, we'll link
them in a list and add to a pool in one short action (under a mutex)
*/
local_pool_end= lo->waiting_lock ? lo->waiting_lock : lo->active_locks;
if (!local_pool_end)
return;
/* release a waiting lock, if any */
if ((lock= lo->waiting_lock))
{
DBUG_ASSERT(lock->loid == lo->loid);
pthread_mutex_lock(& lock->table->mutex);
remove_from_wait_queue(lock, lock->table);
/*
a special case: if this lock was not the last in the wait queue
and it's compatible with the next lock, than the next lock
is waiting for our blocker though really it waits for us, indirectly.
Signal our blocker to release this next lock (after we removed our
lock from the wait queue, of course).
*/
if (lock->prev &&
lock_compatibility_matrix[lock->prev->lock_type][lock->lock_type])
{
pthread_mutex_lock(lo->waiting_for->mutex);
pthread_cond_broadcast(lo->waiting_for->cond);
pthread_mutex_unlock(lo->waiting_for->mutex);
}
lo->waiting_for= 0;
pthread_mutex_unlock(& lock->table->mutex);
lock->next= local_pool;
local_pool= lock;
}
/* now release granted locks */
lock= lo->active_locks;
while (lock)
{
TABLE_LOCK *cur= lock;
pthread_mutex_t *mutex= & lock->table->mutex;
DBUG_ASSERT(cur->loid == lo->loid);
lock= lock->next_in_lo;
/* TODO ? group locks by table to reduce the number of mutex locks */
pthread_mutex_lock(mutex);
hash_delete(& cur->table->active, (byte *)cur);
if (cur->prev)
cur->prev->next= cur->next;
if (cur->next)
cur->next->prev= cur->prev;
if (cur->table->active_locks[cur->lock_type-1] == cur)
cur->table->active_locks[cur->lock_type-1]= cur->next;
cur->next= local_pool;
local_pool= cur;
pthread_mutex_unlock(mutex);
}
lo->waiting_lock= lo->active_locks= 0;
/*
okay, all locks released. now signal that we're leaving,
in case somebody's waiting for it
*/
pthread_mutex_lock(lo->mutex);
pthread_cond_broadcast(lo->cond);
pthread_mutex_unlock(lo->mutex);
/* and push all freed locks to the lockman's pool */
pthread_mutex_lock(& lm->pool_mutex);
local_pool_end->next= lm->pool;
lm->pool= local_pool;
pthread_mutex_unlock(& lm->pool_mutex);
}
void tablockman_init(TABLOCKMAN *lm, loid_to_tlo_func *func, uint timeout)
{
lm->pool= 0;
lm->loid_to_tlo= func;
lm->lock_timeout= timeout;
pthread_mutex_init(&lm->pool_mutex, MY_MUTEX_INIT_FAST);
}
void tablockman_destroy(TABLOCKMAN *lm)
{
while (lm->pool)
{
TABLE_LOCK *tmp= lm->pool;
lm->pool= tmp->next;
my_free((void *)tmp, MYF(0));
}
pthread_mutex_destroy(&lm->pool_mutex);
}
void tablockman_init_locked_table(LOCKED_TABLE *lt, int initial_hash_size)
{
TABLE_LOCK *unused;
bzero(lt, sizeof(*lt));
pthread_mutex_init(& lt->mutex, MY_MUTEX_INIT_FAST);
hash_init(& lt->active, &my_charset_bin, initial_hash_size,
offsetof(TABLE_LOCK, loid), sizeof(unused->loid), 0, 0, 0);
}
void tablockman_destroy_locked_table(LOCKED_TABLE *lt)
{
hash_free(& lt->active);
pthread_mutex_destroy(& lt->mutex);
}
#ifdef EXTRA_DEBUG
static char *lock2str[LOCK_TYPES+1]= {"N", "S", "X", "IS", "IX", "SIX",
"LS", "LX", "SLX", "LSIX"};
void print_tlo(TABLE_LOCK_OWNER *lo)
{
TABLE_LOCK *lock;
printf("lo%d>", lo->loid);
if ((lock= lo->waiting_lock))
printf(" (%s.%p)", lock2str[lock->lock_type], lock->table);
for (lock= lo->active_locks; lock && lock != lock->next_in_lo; lock= lock->next_in_lo)
printf(" %s.%p", lock2str[lock->lock_type], lock->table);
if (lock && lock == lock->next_in_lo)
printf("!");
printf("\n");
}
#endif
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef _tablockman_h
#define _tablockman_h
/*
Lock levels:
^^^^^^^^^^^
N - "no lock", not a lock, used sometimes internally to simplify the code
S - Shared
X - eXclusive
IS - Intention Shared
IX - Intention eXclusive
SIX - Shared + Intention eXclusive
LS - Loose Shared
LX - Loose eXclusive
SLX - Shared + Loose eXclusive
LSIX - Loose Shared + Intention eXclusive
*/
#ifndef _lockman_h
enum lock_type { N, S, X, IS, IX, SIX, LS, LX, SLX, LSIX };
enum lockman_getlock_result {
DIDNT_GET_THE_LOCK=0, GOT_THE_LOCK,
GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE,
GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE
};
#endif
#define LOCK_TYPES LSIX
typedef struct st_table_lock_owner TABLE_LOCK_OWNER;
typedef struct st_table_lock TABLE_LOCK;
typedef struct st_locked_table LOCKED_TABLE;
typedef TABLE_LOCK_OWNER *loid_to_tlo_func(uint16);
typedef struct {
pthread_mutex_t pool_mutex;
TABLE_LOCK *pool; /* lifo pool of free locks */
uint lock_timeout;
loid_to_tlo_func *loid_to_tlo; /* for mapping loid to TABLE_LOCK_OWNER */
} TABLOCKMAN;
struct st_table_lock_owner {
TABLE_LOCK *active_locks; /* list of active locks */
TABLE_LOCK *waiting_lock; /* waiting lock (one lock only) */
TABLE_LOCK_OWNER *waiting_for; /* transaction we're wating for */
pthread_cond_t *cond; /* transactions waiting for us, wait on 'cond' */
pthread_mutex_t *mutex; /* mutex is required to use 'cond' */
uint16 loid; /* Lock Owner IDentifier */
};
struct st_locked_table {
pthread_mutex_t mutex; /* mutex for everything below */
HASH active; /* active locks ina hash */
TABLE_LOCK *active_locks[LOCK_TYPES]; /* dl-list of locks per type */
TABLE_LOCK *wait_queue_in, *wait_queue_out; /* wait deque */
};
void tablockman_init(TABLOCKMAN *, loid_to_tlo_func *, uint);
void tablockman_destroy(TABLOCKMAN *);
enum lockman_getlock_result tablockman_getlock(TABLOCKMAN *, TABLE_LOCK_OWNER *,
LOCKED_TABLE *, enum lock_type);
void tablockman_release_locks(TABLOCKMAN *, TABLE_LOCK_OWNER *);
void tablockman_init_locked_table(LOCKED_TABLE *, int);
void tablockman_destroy_locked_table(LOCKED_TABLE *);
#ifdef EXTRA_DEBUG
void print_tlo(TABLE_LOCK_OWNER *);
#endif
#endif
......@@ -25,5 +25,5 @@ LDADD= $(top_builddir)/unittest/mytap/libmytap.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@
noinst_PROGRAMS = ma_control_file-t trnman-t lockman-t
noinst_PROGRAMS = ma_control_file-t trnman-t lockman-t lockman1-t lockman2-t
CLEANFILES = maria_control
......@@ -268,7 +268,7 @@ int main()
test_lockman_simple();
#define CYCLES 1000
#define CYCLES 10000
#define THREADS Nlos /* don't change this line */
/* mixed load, stress-test with random locks */
......
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
//#define EXTRA_VERBOSE
#include <tap.h>
#include <my_global.h>
#include <my_sys.h>
#include <my_atomic.h>
#include <lf.h>
#include "../lockman.h"
#include "../tablockman.h"
#define Nlos 100
#define Ntbls 10
LOCK_OWNER loarray[Nlos];
TABLE_LOCK_OWNER loarray1[Nlos];
pthread_mutex_t mutexes[Nlos];
pthread_cond_t conds[Nlos];
LOCKED_TABLE ltarray[Ntbls];
LOCKMAN lockman;
TABLOCKMAN tablockman;
#ifndef EXTRA_VERBOSE
#define print_lo1(X) /* no-op */
#define DIAG(X) /* no-op */
#else
#define DIAG(X) diag X
#endif
LOCK_OWNER *loid2lo(uint16 loid)
{
return loarray+loid-1;
}
TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
{
return loarray1+loid-1;
}
#define unlock_all(O) diag("lo" #O "> release all locks"); \
tablockman_release_locks(&tablockman, loid2lo1(O));
#define test_lock(O, R, L, S, RES) \
ok(tablockman_getlock(&tablockman, loid2lo1(O), &ltarray[R], L) == RES, \
"lo" #O "> " S "lock resource " #R " with " #L "-lock"); \
print_lo1(loid2lo1(O));
#define lock_ok_a(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK)
#define lock_ok_i(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE)
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ", DIDNT_GET_THE_LOCK);
void test_tablockman_simple()
{
/* simple */
lock_ok_a(1, 1, S);
lock_ok_i(2, 2, IS);
lock_ok_i(1, 2, IX);
/* lock escalation */
lock_ok_a(1, 1, X);
lock_ok_i(2, 2, IX);
/* failures */
lock_conflict(2, 1, X);
unlock_all(2);
lock_ok_a(1, 2, S);
lock_ok_a(1, 2, IS);
lock_ok_a(1, 2, LS);
lock_ok_i(1, 3, IX);
lock_ok_a(2, 3, LS);
lock_ok_i(1, 3, IX);
lock_ok_l(2, 3, IS);
unlock_all(1);
unlock_all(2);
lock_ok_i(1, 1, IX);
lock_conflict(2, 1, S);
lock_ok_a(1, 1, LS);
unlock_all(1);
unlock_all(2);
lock_ok_i(1, 1, IX);
lock_ok_a(2, 1, LS);
lock_ok_a(1, 1, LS);
lock_ok_i(1, 1, IX);
lock_ok_i(3, 1, IS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
lock_ok_i(1, 4, IS);
lock_ok_i(2, 4, IS);
lock_ok_i(3, 4, IS);
lock_ok_a(3, 4, LS);
lock_ok_i(4, 4, IS);
lock_conflict(4, 4, IX);
lock_conflict(2, 4, IX);
lock_ok_a(1, 4, LS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
unlock_all(4);
lock_ok_i(1, 1, IX);
lock_ok_i(2, 1, IX);
lock_conflict(1, 1, S);
lock_conflict(2, 1, X);
unlock_all(1);
unlock_all(2);
}
int rt_num_threads;
int litmus;
int thread_number= 0, timeouts= 0;
void run_test(const char *test, pthread_handler handler, int n, int m)
{
pthread_t *threads;
ulonglong now= my_getsystime();
int i;
thread_number= timeouts= 0;
litmus= 0;
threads= (pthread_t *)my_malloc(sizeof(void *)*n, MYF(0));
if (!threads)
{
diag("Out of memory");
abort();
}
diag("Running %s with %d threads, %d iterations... ", test, n, m);
rt_num_threads= n;
for (i= 0; i < n ; i++)
if (pthread_create(threads+i, 0, handler, &m))
{
diag("Could not create thread");
abort();
}
for (i= 0 ; i < n ; i++)
pthread_join(threads[i], 0);
now= my_getsystime()-now;
ok(litmus == 0, "Finished %s in %g secs (%d)", test, ((double)now)/1e7, litmus);
my_free((void*)threads, MYF(0));
}
pthread_mutex_t rt_mutex;
int Nrows= 100;
int Ntables= 10;
int table_lock_ratio= 10;
enum lock_type lock_array[6]= {S, X, LS, LX, IS, IX};
char *lock2str[6]= {"S", "X", "LS", "LX", "IS", "IX"};
char *res2str[4]= {
"DIDN'T GET THE LOCK",
"GOT THE LOCK",
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE",
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"};
pthread_handler_t test_lockman(void *arg)
{
int m= (*(int *)arg);
uint x, loid, row, table, res, locklevel, timeout= 0;
LOCK_OWNER *lo; TABLE_LOCK_OWNER *lo1; DBUG_ASSERT(Ntables <= Ntbls);
pthread_mutex_lock(&rt_mutex);
loid= ++thread_number;
pthread_mutex_unlock(&rt_mutex);
lo= loid2lo(loid); lo1= loid2lo1(loid);
for (x= ((int)(intptr)(&m)); m > 0; m--)
{
x= (x*3628273133 + 1500450271) % 9576890767; /* three prime numbers */
row= x % Nrows + Ntables;
table= row % Ntables;
locklevel= (x/Nrows) & 3;
if (table_lock_ratio && (x/Nrows/4) % table_lock_ratio == 0)
{ /* table lock */
res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel]);
DIAG(("loid %2d, table %d, lock %s, res %s", loid, table,
lock2str[locklevel], res2str[res]));
if (res == DIDNT_GET_THE_LOCK)
{
lockman_release_locks(&lockman, lo); tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
}
DBUG_ASSERT(res == GOT_THE_LOCK);
}
else
{ /* row lock */
locklevel&= 1;
res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel + 4]);
DIAG(("loid %2d, row %d, lock %s, res %s", loid, row,
lock2str[locklevel+4], res2str[res]));
switch (res)
{
case DIDNT_GET_THE_LOCK:
lockman_release_locks(&lockman, lo); tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
case GOT_THE_LOCK:
continue;
case GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE:
/* not implemented, so take a regular lock */
case GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE:
res= lockman_getlock(&lockman, lo, row, lock_array[locklevel]);
DIAG(("loid %2d, ROW %d, lock %s, res %s", loid, row,
lock2str[locklevel], res2str[res]));
if (res == DIDNT_GET_THE_LOCK)
{
lockman_release_locks(&lockman, lo);
tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
}
DBUG_ASSERT(res == GOT_THE_LOCK);
continue;
default:
DBUG_ASSERT(0);
}
}
}
lockman_release_locks(&lockman, lo);
tablockman_release_locks(&tablockman, lo1);
pthread_mutex_lock(&rt_mutex);
rt_num_threads--;
timeouts+= timeout;
if (!rt_num_threads)
diag("number of timeouts: %d", timeouts);
pthread_mutex_unlock(&rt_mutex);
return 0;
}
int main()
{
int i;
my_init();
pthread_mutex_init(&rt_mutex, 0);
plan(35);
if (my_atomic_initialize())
return exit_status();
lockman_init(&lockman, &loid2lo, 50);
tablockman_init(&tablockman, &loid2lo1, 50);
for (i= 0; i < Nlos; i++)
{
pthread_mutex_init(&mutexes[i], MY_MUTEX_INIT_FAST);
pthread_cond_init (&conds[i], 0);
loarray[i].pins= lf_alloc_get_pins(&lockman.alloc);
loarray[i].all_locks= 0;
loarray[i].waiting_for= 0;
loarray[i].mutex= &mutexes[i];
loarray[i].cond= &conds[i];
loarray[i].loid= i+1;
loarray1[i].active_locks= 0;
loarray1[i].waiting_lock= 0;
loarray1[i].waiting_for= 0;
loarray1[i].mutex= &mutexes[i];
loarray1[i].cond= &conds[i];
loarray1[i].loid= i+1;
}
for (i= 0; i < Ntbls; i++)
{
tablockman_init_locked_table(ltarray+i, Nlos);
}
test_tablockman_simple();
#define CYCLES 10000
#define THREADS Nlos /* don't change this line */
/* mixed load, stress-test with random locks */
Nrows= 100;
Ntables= 10;
table_lock_ratio= 10;
run_test("\"random lock\" stress test", test_lockman, THREADS, CYCLES);
/* "real-life" simulation - many rows, no table locks */
Nrows= 1000000;
Ntables= 10;
table_lock_ratio= 0;
run_test("\"real-life\" simulation test", test_lockman, THREADS, CYCLES*10);
for (i= 0; i < Nlos; i++)
{
lockman_release_locks(&lockman, &loarray[i]);
pthread_mutex_destroy(loarray[i].mutex);
pthread_cond_destroy(loarray[i].cond);
lf_pinbox_put_pins(loarray[i].pins);
}
{
ulonglong now= my_getsystime();
lockman_destroy(&lockman);
now= my_getsystime()-now;
diag("lockman_destroy: %g secs", ((double)now)/1e7);
}
pthread_mutex_destroy(&rt_mutex);
my_end(0);
return exit_status();
}
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
//#define EXTRA_VERBOSE
#include <tap.h>
#include <my_global.h>
#include <my_sys.h>
#include <my_atomic.h>
#include <lf.h>
#include "../tablockman.h"
#define Nlos 100
#define Ntbls 110
TABLE_LOCK_OWNER loarray1[Nlos];
pthread_mutex_t mutexes[Nlos];
pthread_cond_t conds[Nlos];
LOCKED_TABLE ltarray[Ntbls];
TABLOCKMAN tablockman;
#ifndef EXTRA_VERBOSE
#define print_lo1(X) /* no-op */
#define DIAG(X) /* no-op */
#else
#define DIAG(X) diag X
#endif
TABLE_LOCK_OWNER *loid2lo1(uint16 loid)
{
return loarray1+loid-1;
}
#define unlock_all(O) diag("lo" #O "> release all locks"); \
tablockman_release_locks(&tablockman, loid2lo1(O));
#define test_lock(O, R, L, S, RES) \
ok(tablockman_getlock(&tablockman, loid2lo1(O), &ltarray[R], L) == RES, \
"lo" #O "> " S "lock resource " #R " with " #L "-lock"); \
print_lo1(loid2lo1(O));
#define lock_ok_a(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK)
#define lock_ok_i(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE)
#define lock_ok_l(O, R, L) \
test_lock(O, R, L, "", GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE)
#define lock_conflict(O, R, L) \
test_lock(O, R, L, "cannot ", DIDNT_GET_THE_LOCK);
void test_tablockman_simple()
{
/* simple */
lock_ok_a(1, 1, S);
lock_ok_i(2, 2, IS);
lock_ok_i(1, 2, IX);
/* lock escalation */
lock_ok_a(1, 1, X);
lock_ok_i(2, 2, IX);
/* failures */
lock_conflict(2, 1, X);
unlock_all(2);
lock_ok_a(1, 2, S);
lock_ok_a(1, 2, IS);
lock_ok_a(1, 2, LS);
lock_ok_i(1, 3, IX);
lock_ok_a(2, 3, LS);
lock_ok_i(1, 3, IX);
lock_ok_l(2, 3, IS);
unlock_all(1);
unlock_all(2);
lock_ok_i(1, 1, IX);
lock_conflict(2, 1, S);
lock_ok_a(1, 1, LS);
unlock_all(1);
unlock_all(2);
lock_ok_i(1, 1, IX);
lock_ok_a(2, 1, LS);
lock_ok_a(1, 1, LS);
lock_ok_i(1, 1, IX);
lock_ok_i(3, 1, IS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
lock_ok_i(1, 4, IS);
lock_ok_i(2, 4, IS);
lock_ok_i(3, 4, IS);
lock_ok_a(3, 4, LS);
lock_ok_i(4, 4, IS);
lock_conflict(4, 4, IX);
lock_conflict(2, 4, IX);
lock_ok_a(1, 4, LS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
unlock_all(4);
lock_ok_i(1, 1, IX);
lock_ok_i(2, 1, IX);
lock_conflict(1, 1, S);
lock_conflict(2, 1, X);
unlock_all(1);
unlock_all(2);
lock_ok_i(1, 1, IS);
lock_conflict(2, 1, X);
lock_conflict(3, 1, IS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
lock_ok_a(1, 1, S);
lock_conflict(2, 1, IX);
lock_conflict(3, 1, IS);
unlock_all(1);
unlock_all(2);
unlock_all(3);
}
int rt_num_threads;
int litmus;
int thread_number= 0, timeouts= 0;
void run_test(const char *test, pthread_handler handler, int n, int m)
{
pthread_t *threads;
ulonglong now= my_getsystime();
int i;
thread_number= timeouts= 0;
litmus= 0;
threads= (pthread_t *)my_malloc(sizeof(void *)*n, MYF(0));
if (!threads)
{
diag("Out of memory");
abort();
}
diag("Running %s with %d threads, %d iterations... ", test, n, m);
rt_num_threads= n;
for (i= 0; i < n ; i++)
if (pthread_create(threads+i, 0, handler, &m))
{
diag("Could not create thread");
abort();
}
for (i= 0 ; i < n ; i++)
pthread_join(threads[i], 0);
now= my_getsystime()-now;
ok(litmus == 0, "Finished %s in %g secs (%d)", test, ((double)now)/1e7, litmus);
my_free((void*)threads, MYF(0));
}
pthread_mutex_t rt_mutex;
int Nrows= 100;
int Ntables= 10;
int table_lock_ratio= 10;
enum lock_type lock_array[6]= {S, X, LS, LX, IS, IX};
char *lock2str[6]= {"S", "X", "LS", "LX", "IS", "IX"};
char *res2str[4]= {
"DIDN'T GET THE LOCK",
"GOT THE LOCK",
"GOT THE LOCK NEED TO LOCK A SUBRESOURCE",
"GOT THE LOCK NEED TO INSTANT LOCK A SUBRESOURCE"};
pthread_handler_t test_lockman(void *arg)
{
int m= (*(int *)arg);
uint x, loid, row, table, res, locklevel, timeout= 0;
TABLE_LOCK_OWNER *lo1;
DBUG_ASSERT(Ntables <= Ntbls);
DBUG_ASSERT(Nrows + Ntables <= Ntbls);
pthread_mutex_lock(&rt_mutex);
loid= ++thread_number;
pthread_mutex_unlock(&rt_mutex);
lo1= loid2lo1(loid);
for (x= ((int)(intptr)(&m)); m > 0; m--)
{
x= (x*3628273133 + 1500450271) % 9576890767; /* three prime numbers */
row= x % Nrows + Ntables;
table= row % Ntables;
locklevel= (x/Nrows) & 3;
if (table_lock_ratio && (x/Nrows/4) % table_lock_ratio == 0)
{ /* table lock */
res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel]);
DIAG(("loid %2d, table %d, lock %s, res %s", loid, table,
lock2str[locklevel], res2str[res]));
if (res == DIDNT_GET_THE_LOCK)
{
tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
}
DBUG_ASSERT(res == GOT_THE_LOCK);
}
else
{ /* row lock */
locklevel&= 1;
res= tablockman_getlock(&tablockman, lo1, ltarray+table, lock_array[locklevel + 4]);
DIAG(("loid %2d, row %d, lock %s, res %s", loid, row,
lock2str[locklevel+4], res2str[res]));
switch (res)
{
case DIDNT_GET_THE_LOCK:
tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
case GOT_THE_LOCK:
continue;
case GOT_THE_LOCK_NEED_TO_INSTANT_LOCK_A_SUBRESOURCE:
/* not implemented, so take a regular lock */
case GOT_THE_LOCK_NEED_TO_LOCK_A_SUBRESOURCE:
res= tablockman_getlock(&tablockman, lo1, ltarray+row, lock_array[locklevel]);
DIAG(("loid %2d, ROW %d, lock %s, res %s", loid, row,
lock2str[locklevel], res2str[res]));
if (res == DIDNT_GET_THE_LOCK)
{
tablockman_release_locks(&tablockman, lo1);
DIAG(("loid %2d, release all locks", loid));
timeout++;
continue;
}
DBUG_ASSERT(res == GOT_THE_LOCK);
continue;
default:
DBUG_ASSERT(0);
}
}
}
tablockman_release_locks(&tablockman, lo1);
pthread_mutex_lock(&rt_mutex);
rt_num_threads--;
timeouts+= timeout;
if (!rt_num_threads)
diag("number of timeouts: %d", timeouts);
pthread_mutex_unlock(&rt_mutex);
return 0;
}
int main()
{
int i;
my_init();
pthread_mutex_init(&rt_mutex, 0);
plan(35);
if (my_atomic_initialize())
return exit_status();
tablockman_init(&tablockman, &loid2lo1, 50);
for (i= 0; i < Nlos; i++)
{
pthread_mutex_init(&mutexes[i], MY_MUTEX_INIT_FAST);
pthread_cond_init (&conds[i], 0);
loarray1[i].active_locks= 0;
loarray1[i].waiting_lock= 0;
loarray1[i].waiting_for= 0;
loarray1[i].mutex= &mutexes[i];
loarray1[i].cond= &conds[i];
loarray1[i].loid= i+1;
}
for (i= 0; i < Ntbls; i++)
{
tablockman_init_locked_table(ltarray+i, Nlos);
}
test_tablockman_simple();
#define CYCLES 10000
#define THREADS Nlos /* don't change this line */
/* mixed load, stress-test with random locks */
Nrows= 100;
Ntables= 10;
table_lock_ratio= 10;
//run_test("\"random lock\" stress test", test_lockman, THREADS, CYCLES);
#if 0
/* "real-life" simulation - many rows, no table locks */
Nrows= 1000000;
Ntables= 10;
table_lock_ratio= 0;
run_test("\"real-life\" simulation test", test_lockman, THREADS, CYCLES*10);
#endif
for (i= 0; i < Nlos; i++)
{
tablockman_release_locks(&tablockman, &loarray1[i]);
pthread_mutex_destroy(loarray1[i].mutex);
pthread_cond_destroy(loarray1[i].cond);
}
{
ulonglong now= my_getsystime();
for (i= 0; i < Ntbls; i++)
{
tablockman_destroy_locked_table(ltarray+i);
}
tablockman_destroy(&tablockman);
now= my_getsystime()-now;
diag("lockman_destroy: %g secs", ((double)now)/1e7);
}
pthread_mutex_destroy(&rt_mutex);
my_end(0);
return exit_status();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment