Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
34666ecb
Commit
34666ecb
authored
Mar 02, 2014
by
John Esmet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixes #190 Use the locktree escalator lambda function API, remove
condidtional compilation.
parent
433e2bab
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
54 deletions
+9
-54
locktree/locktree.h
locktree/locktree.h
+3
-10
locktree/manager.cc
locktree/manager.cc
+6
-44
No files found.
locktree/locktree.h
View file @
34666ecb
...
...
@@ -92,6 +92,8 @@ PATENT RIGHTS GRANT:
#ifndef TOKU_LOCKTREE_H
#define TOKU_LOCKTREE_H
#include <functional>
#include <db.h>
#include <toku_time.h>
#include <toku_pthread.h>
...
...
@@ -105,11 +107,6 @@ PATENT RIGHTS GRANT:
#include "wfg.h"
#include "range_buffer.h"
#define TOKU_LOCKTREE_ESCALATOR_LAMBDA 0
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
#include <functional>
#endif
enum
{
LTM_SIZE_CURRENT
=
0
,
LTM_SIZE_LIMIT
,
...
...
@@ -225,11 +222,7 @@ class locktree {
public:
void
create
(
void
);
void
destroy
(
void
);
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
void
run
(
manager
*
mgr
,
std
::
function
<
void
(
void
)
>
escalate_locktrees_fun
);
#else
void
run
(
manager
*
mgr
,
void
(
*
escalate_locktrees_fun
)(
void
*
extra
),
void
*
extra
);
#endif
private:
toku_mutex_t
m_escalator_mutex
;
toku_cond_t
m_escalator_done
;
...
...
@@ -342,7 +335,7 @@ class locktree {
void
escalate_all_locktrees
(
void
);
// Escalate a set of locktrees
void
escalate_locktrees
(
locktree
**
locktrees
,
int
num_locktrees
);
void
escalate_locktrees
(
locktree
*
const
*
locktrees
,
int
num_locktrees
);
// Add time t to the escalator's wait time statistics
void
add_escalator_wait_time
(
uint64_t
t
);
...
...
locktree/manager.cc
View file @
34666ecb
...
...
@@ -304,27 +304,15 @@ void locktree::manager::release_lt(locktree *lt) {
}
// test-only version of lock escalation
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
void
locktree
::
manager
::
run_escalation
(
void
)
{
m_escalator
.
run
(
this
,
[
this
]
()
->
void
{
escalate_all_locktrees
();
});
}
#else
static
void
manager_run_escalation_fun
(
void
*
extra
)
{
locktree
::
manager
*
thismanager
=
(
locktree
::
manager
*
)
extra
;
thismanager
->
escalate_all_locktrees
();
}
void
locktree
::
manager
::
run_escalation
(
void
)
{
m_escalator
.
run
(
this
,
manager_run_escalation_fun
,
this
);
}
#endif
void
locktree
::
manager
::
run_escalation_for_test
(
void
)
{
run_escalation
();
}
void
locktree
::
manager
::
escalate_all_locktrees
(
void
)
{
if
(
0
)
fprintf
(
stderr
,
"%d %s:%u
\n
"
,
toku_os_gettid
(),
__PRETTY_FUNCTION__
,
__LINE__
);
uint64_t
t0
=
toku_current_time_microsec
();
// get all locktrees
...
...
@@ -458,8 +446,7 @@ void locktree::manager::add_escalator_wait_time(uint64_t t) {
toku_mutex_unlock
(
&
m_escalation_mutex
);
}
void
locktree
::
manager
::
escalate_locktrees
(
locktree
**
locktrees
,
int
num_locktrees
)
{
if
(
0
)
fprintf
(
stderr
,
"%d %s:%u %d
\n
"
,
toku_os_gettid
(),
__PRETTY_FUNCTION__
,
__LINE__
,
num_locktrees
);
void
locktree
::
manager
::
escalate_locktrees
(
locktree
*
const
*
locktrees
,
int
num_locktrees
)
{
// there are too many row locks in the system and we need to tidy up.
//
// a simple implementation of escalation does not attempt
...
...
@@ -481,34 +468,17 @@ void locktree::manager::escalate_locktrees(locktree **locktrees, int num_locktre
toku_mutex_unlock
(
&
m_escalation_mutex
);
}
#if !TOKU_LOCKTREE_ESCALATOR_LAMBDA
struct
escalate_args
{
locktree
::
manager
*
mgr
;
locktree
**
locktrees
;
int
num_locktrees
;
};
static
void
manager_escalate_locktrees
(
void
*
extra
)
{
escalate_args
*
args
=
(
escalate_args
*
)
extra
;
args
->
mgr
->
escalate_locktrees
(
args
->
locktrees
,
args
->
num_locktrees
);
}
#endif
void
locktree
::
manager
::
escalate_lock_trees_for_txn
(
TXNID
txnid
UU
(),
locktree
*
lt
UU
())
{
// get lock trees for txnid
const
int
num_locktrees
=
1
;
locktree
*
locktrees
[
1
]
=
{
lt
};
reference_lt
(
lt
);
// escalate these lock trees
locktree
::
escalator
this_escalator
;
this_escalator
.
create
();
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
this_escalator
.
run
(
this
,
[
this
,
locktrees
,
num_locktrees
]
()
->
void
{
escalate_locktrees
(
locktrees
,
num_locktrees
);
});
#else
escalate_args
args
=
{
this
,
locktrees
,
num_locktrees
};
this_escalator
.
run
(
this
,
manager_escalate_locktrees
,
&
args
);
#endif
this_escalator
.
run
(
this
,
[
this
,
lt
]
()
->
void
{
locktree
*
locktrees
[
1
]
=
{
lt
};
escalate_locktrees
(
locktrees
,
1
);
});
this_escalator
.
destroy
();
}
...
...
@@ -524,22 +494,14 @@ void locktree::escalator::destroy(void) {
toku_mutex_destroy
(
&
m_escalator_mutex
);
}
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
void
locktree
::
escalator
::
run
(
locktree
::
manager
*
mgr
,
std
::
function
<
void
(
void
)
>
escalate_locktrees_fun
)
{
#else
void
locktree
::
escalator
::
run
(
locktree
::
manager
*
mgr
,
void
(
*
escalate_locktrees_fun
)(
void
*
extra
),
void
*
extra
)
{
#endif
uint64_t
t0
=
toku_current_time_microsec
();
toku_mutex_lock
(
&
m_escalator_mutex
);
if
(
!
m_escalator_running
)
{
// run escalation on this thread
m_escalator_running
=
true
;
toku_mutex_unlock
(
&
m_escalator_mutex
);
#if TOKU_LOCKTREE_ESCALATOR_LAMBDA
escalate_locktrees_fun
();
#else
escalate_locktrees_fun
(
extra
);
#endif
toku_mutex_lock
(
&
m_escalator_mutex
);
m_escalator_running
=
false
;
toku_cond_broadcast
(
&
m_escalator_done
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment