Commit 4d4ce59d authored by Sergei Golubchik's avatar Sergei Golubchik

compilation fixes for WITH_ATOMIC_OPS=rwlocks

parent efc93eb3
......@@ -41,13 +41,6 @@ typedef char my_atomic_rwlock_t;
typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
#ifndef SAFE_MUTEX
/*
we're using read-write lock macros but map them to mutex locks, and they're
faster. Still, having semantically rich API we can change the
underlying implementation, if necessary.
*/
#define my_atomic_rwlock_destroy(name) pthread_mutex_destroy(& (name)->rw)
#define my_atomic_rwlock_init(name) pthread_mutex_init(& (name)->rw, 0)
#define my_atomic_rwlock_rdlock(name) pthread_mutex_lock(& (name)->rw)
......@@ -55,37 +48,6 @@ typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
#define my_atomic_rwlock_rdunlock(name) pthread_mutex_unlock(& (name)->rw)
#define my_atomic_rwlock_wrunlock(name) pthread_mutex_unlock(& (name)->rw)
#else /* SAFE_MUTEX */
/*
SAFE_MUTEX pollutes the compiling name space with macros
that alter pthread_mutex_t, pthread_mutex_init, etc.
Atomic operations should never use the safe mutex wrappers.
Unfortunately, there is no way to have both:
- safe mutex macros expanding pthread_mutex_lock to safe_mutex_lock
- my_atomic macros expanding to unmodified pthread_mutex_lock
inlined in the same compilation unit.
So, in case of SAFE_MUTEX, a function call is required.
Given that SAFE_MUTEX is a debugging facility,
this extra function call is not a performance concern for
production builds.
*/
C_MODE_START
extern void plain_pthread_mutex_init(safe_mutex_t *);
extern void plain_pthread_mutex_destroy(safe_mutex_t *);
extern void plain_pthread_mutex_lock(safe_mutex_t *);
extern void plain_pthread_mutex_unlock(safe_mutex_t *);
C_MODE_END
#define my_atomic_rwlock_destroy(name) plain_pthread_mutex_destroy(&(name)->rw)
#define my_atomic_rwlock_init(name) plain_pthread_mutex_init(&(name)->rw)
#define my_atomic_rwlock_rdlock(name) plain_pthread_mutex_lock(&(name)->rw)
#define my_atomic_rwlock_wrlock(name) plain_pthread_mutex_lock(&(name)->rw)
#define my_atomic_rwlock_rdunlock(name) plain_pthread_mutex_unlock(&(name)->rw)
#define my_atomic_rwlock_wrunlock(name) plain_pthread_mutex_unlock(&(name)->rw)
#endif /* SAFE_MUTEX */
#define MY_ATOMIC_MODE "mutex"
#ifndef MY_ATOMIC_MODE_RWLOCKS
#define MY_ATOMIC_MODE_RWLOCKS 1
......
......@@ -4128,6 +4128,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
included= 1;
to_purge_if_included= my_strdup(ir->name, MYF(0));
}
my_atomic_rwlock_destroy(&ir->inuse_relaylog_atomic_lock);
my_free(ir);
ir= next;
}
......
......@@ -4,7 +4,6 @@
#include "rpl_mi.h"
#include "debug_sync.h"
/*
Code for optional parallel execution of replicated events on the slave.
*/
......@@ -844,9 +843,9 @@ handle_rpl_parallel_thread(void *arg)
{
if (last_ir)
{
my_atomic_rwlock_wrlock(&rli->inuse_relaylog_atomic_lock);
my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock);
my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count);
my_atomic_rwlock_wrunlock(&rli->inuse_relaylog_atomic_lock);
my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock);
accumulated_ir_count= 0;
}
last_ir= ir;
......@@ -857,9 +856,9 @@ handle_rpl_parallel_thread(void *arg)
}
if (last_ir)
{
my_atomic_rwlock_wrlock(&rli->inuse_relaylog_atomic_lock);
my_atomic_rwlock_wrlock(&last_ir->inuse_relaylog_atomic_lock);
my_atomic_add64(&last_ir->dequeued_count, accumulated_ir_count);
my_atomic_rwlock_wrunlock(&rli->inuse_relaylog_atomic_lock);
my_atomic_rwlock_wrunlock(&last_ir->inuse_relaylog_atomic_lock);
}
if ((events= rpt->event_queue) != NULL)
......
......@@ -92,7 +92,6 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
mysql_cond_init(key_relay_log_info_start_cond, &start_cond, NULL);
mysql_cond_init(key_relay_log_info_stop_cond, &stop_cond, NULL);
mysql_cond_init(key_relay_log_info_log_space_cond, &log_space_cond, NULL);
my_atomic_rwlock_init(&inuse_relaylog_atomic_lock);
relay_log.init_pthread_objects();
DBUG_VOID_RETURN;
}
......@@ -108,6 +107,7 @@ Relay_log_info::~Relay_log_info()
{
DBUG_ASSERT(cur->queued_count == cur->dequeued_count);
inuse_relaylog *next= cur->next;
my_atomic_rwlock_destroy(&cur->inuse_relaylog_atomic_lock);
my_free(cur);
cur= next;
}
......@@ -118,7 +118,6 @@ Relay_log_info::~Relay_log_info()
mysql_cond_destroy(&start_cond);
mysql_cond_destroy(&stop_cond);
mysql_cond_destroy(&log_space_cond);
my_atomic_rwlock_destroy(&inuse_relaylog_atomic_lock);
relay_log.cleanup();
DBUG_VOID_RETURN;
}
......@@ -1371,6 +1370,7 @@ Relay_log_info::alloc_inuse_relaylog(const char *name)
last_inuse_relaylog->next= ir;
}
last_inuse_relaylog= ir;
my_atomic_rwlock_init(&ir->inuse_relaylog_atomic_lock);
return 0;
}
......
......@@ -170,8 +170,6 @@ class Relay_log_info : public Slave_reporting_capability
*/
inuse_relaylog *inuse_relaylog_list;
inuse_relaylog *last_inuse_relaylog;
/* Lock used to protect inuse_relaylog::dequeued_count */
my_atomic_rwlock_t inuse_relaylog_atomic_lock;
/*
Needed to deal properly with cur_log getting closed and re-opened with
......@@ -504,6 +502,8 @@ struct inuse_relaylog {
/* Set when all events have been read from a relaylog. */
bool completed;
char name[FN_REFLEN];
/* Lock used to protect inuse_relaylog::dequeued_count */
my_atomic_rwlock_t inuse_relaylog_atomic_lock;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment