Commit 34d024f8 authored by Mark Fasheh's avatar Mark Fasheh

ocfs2: Remove mount/unmount votes

The node maps that are set/unset by these votes are no longer relevant, thus
we can remove the mount and umount votes. Since those are the last two
remaining votes, we can also remove the entire vote infrastructure.

The vote thread has been renamed to the downconvert thread, and the small
amount of functionality related to managing it has been moved into
fs/ocfs2/dlmglue.c. All references to votes have been removed or updated.
Signed-off-by: default avatarMark Fasheh <mark.fasheh@oracle.com>
parent 6f7b056e
......@@ -27,8 +27,7 @@ ocfs2-objs := \
symlink.o \
sysfile.o \
uptodate.o \
ver.o \
vote.o
ver.o
obj-$(CONFIG_OCFS2_FS) += cluster/
obj-$(CONFIG_OCFS2_FS) += dlm/
......@@ -38,6 +38,9 @@
* locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't.
*
* New in version 9:
* - All votes removed
*
* New in version 8:
* - Replace delete inode votes with a cluster lock
*
......@@ -60,7 +63,7 @@
* - full 64 bit i_size in the metadata lock lvbs
* - introduction of "rw" lock and pushing meta/data locking down
*/
#define O2NET_PROTOCOL_VERSION 8ULL
#define O2NET_PROTOCOL_VERSION 9ULL
struct o2net_handshake {
__be64 protocol_version;
__be64 connector_id;
......
......@@ -128,9 +128,9 @@ static int ocfs2_match_dentry(struct dentry *dentry,
/*
* Walk the inode alias list, and find a dentry which has a given
* parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it
* is looking for a dentry_lock reference. The vote thread is looking
* to unhash aliases, so we allow it to skip any that already have
* that property.
* is looking for a dentry_lock reference. The downconvert thread is
* looking to unhash aliases, so we allow it to skip any that already
* have that property.
*/
struct dentry *ocfs2_find_local_alias(struct inode *inode,
u64 parent_blkno,
......@@ -266,7 +266,7 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
dl->dl_count = 0;
/*
* Does this have to happen below, for all attaches, in case
* the struct inode gets blown away by votes?
* the struct inode gets blown away by the downconvert thread?
*/
dl->dl_inode = igrab(inode);
dl->dl_parent_blkno = parent_blkno;
......
......@@ -55,7 +55,6 @@
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
#include "vote.h"
#include "buffer_head_io.h"
......@@ -153,10 +152,10 @@ struct ocfs2_lock_res_ops {
struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
/*
* Optionally called in the downconvert (or "vote") thread
* after a successful downconvert. The lockres will not be
* referenced after this callback is called, so it is safe to
* free memory, etc.
* Optionally called in the downconvert thread after a
* successful downconvert. The lockres will not be referenced
* after this callback is called, so it is safe to free
* memory, etc.
*
* The exact semantics of when this is called are controlled
* by ->downconvert_worker()
......@@ -310,7 +309,8 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
"resource %s: %s\n", dlm_errname(_stat), _func, \
_lockres->l_name, dlm_errmsg(_stat)); \
} while (0)
static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
static int ocfs2_downconvert_thread(void *arg);
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static int ocfs2_meta_lock_update(struct inode *inode,
struct buffer_head **bh);
......@@ -732,7 +732,7 @@ static void ocfs2_blocking_ast(void *opaque, int level)
wake_up(&lockres->l_event);
ocfs2_kick_vote_thread(osb);
ocfs2_wake_downconvert_thread(osb);
}
static void ocfs2_locking_ast(void *opaque)
......@@ -1089,7 +1089,7 @@ static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
ocfs2_vote_on_unlock(osb, lockres);
ocfs2_downconvert_on_unlock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog_exit_void();
}
......@@ -1372,7 +1372,7 @@ int ocfs2_data_lock_with_page(struct inode *inode,
return ret;
}
static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int kick = 0;
......@@ -1380,7 +1380,7 @@ static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
mlog_entry_void();
/* If we know that another node is waiting on our lock, kick
* the vote thread * pre-emptively when we reach a release
* the downconvert thread * pre-emptively when we reach a release
* condition. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
switch(lockres->l_blocking) {
......@@ -1398,7 +1398,7 @@ static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
}
if (kick)
ocfs2_kick_vote_thread(osb);
ocfs2_wake_downconvert_thread(osb);
mlog_exit_void();
}
......@@ -1832,19 +1832,20 @@ int ocfs2_meta_lock_full(struct inode *inode,
}
/*
* This is working around a lock inversion between tasks acquiring DLM locks
* while holding a page lock and the vote thread which blocks dlm lock acquiry
* while acquiring page locks.
* This is working around a lock inversion between tasks acquiring DLM
* locks while holding a page lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring page locks.
*
* ** These _with_page variantes are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
* The DLM is called such that it returns -EAGAIN if it would have blocked
* waiting for the vote thread. In that case we unlock our page so the vote
* thread can make progress. Once we've done this we have to return
* AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
* into the VFS who will then immediately retry the aop call.
* The DLM is called such that it returns -EAGAIN if it would have
* blocked waiting for the downconvert thread. In that case we unlock
* our page so the downconvert thread can make progress. Once we've
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
* that called us can bubble that back up into the VFS who will then
* immediately retry the aop call.
*
* We do a blocking lock and immediate unlock before returning, though, so that
* the lock has a great chance of being cached on this node by the time the VFS
......@@ -2320,11 +2321,11 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
goto bail;
}
/* launch vote thread */
osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
if (IS_ERR(osb->vote_task)) {
status = PTR_ERR(osb->vote_task);
osb->vote_task = NULL;
/* launch downconvert thread */
osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
if (IS_ERR(osb->dc_task)) {
status = PTR_ERR(osb->dc_task);
osb->dc_task = NULL;
mlog_errno(status);
goto bail;
}
......@@ -2353,8 +2354,8 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
bail:
if (status < 0) {
ocfs2_dlm_shutdown_debug(osb);
if (osb->vote_task)
kthread_stop(osb->vote_task);
if (osb->dc_task)
kthread_stop(osb->dc_task);
}
mlog_exit(status);
......@@ -2369,9 +2370,9 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
ocfs2_drop_osb_locks(osb);
if (osb->vote_task) {
kthread_stop(osb->vote_task);
osb->vote_task = NULL;
if (osb->dc_task) {
kthread_stop(osb->dc_task);
osb->dc_task = NULL;
}
ocfs2_lock_res_free(&osb->osb_super_lockres);
......@@ -2527,7 +2528,7 @@ static int ocfs2_drop_lock(struct ocfs2_super *osb,
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the vote thread before we can consider
* being dequeued from the downconvert thread before we can consider
* it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
......@@ -2903,7 +2904,7 @@ static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
/*
* Does the final reference drop on our dentry lock. Right now this
* happens in the vote thread, but we could choose to simplify the
* happens in the downconvert thread, but we could choose to simplify the
* dlmglue API and push these off to the ocfs2_wq in the future.
*/
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
......@@ -3042,7 +3043,7 @@ void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
mlog(0, "lockres %s blocked.\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the vote thread was processing other things. A lock can
* the downconvert thread was processing other things. A lock can
* still be marked with OCFS2_LOCK_FREEING after this check,
* but short circuiting here will still save us some
* performance. */
......@@ -3091,13 +3092,104 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
spin_lock(&osb->vote_task_lock);
spin_lock(&osb->dc_task_lock);
if (list_empty(&lockres->l_blocked_list)) {
list_add_tail(&lockres->l_blocked_list,
&osb->blocked_lock_list);
osb->blocked_lock_count++;
}
spin_unlock(&osb->vote_task_lock);
spin_unlock(&osb->dc_task_lock);
mlog_exit_void();
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{
unsigned long processed;
struct ocfs2_lock_res *lockres;
mlog_entry_void();
spin_lock(&osb->dc_task_lock);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
osb->dc_work_sequence = osb->dc_wake_sequence;
processed = osb->blocked_lock_count;
while (processed) {
BUG_ON(list_empty(&osb->blocked_lock_list));
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
osb->blocked_lock_count--;
spin_unlock(&osb->dc_task_lock);
BUG_ON(!processed);
processed--;
ocfs2_process_blocked_lock(osb, lockres);
spin_lock(&osb->dc_task_lock);
}
spin_unlock(&osb->dc_task_lock);
mlog_exit_void();
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
{
int empty = 0;
spin_lock(&osb->dc_task_lock);
if (list_empty(&osb->blocked_lock_list))
empty = 1;
spin_unlock(&osb->dc_task_lock);
return empty;
}
static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
{
int should_wake = 0;
spin_lock(&osb->dc_task_lock);
if (osb->dc_work_sequence != osb->dc_wake_sequence)
should_wake = 1;
spin_unlock(&osb->dc_task_lock);
return should_wake;
}
int ocfs2_downconvert_thread(void *arg)
{
int status = 0;
struct ocfs2_super *osb = arg;
/* only quit once we've been asked to stop and there is no more
* work available */
while (!(kthread_should_stop() &&
ocfs2_downconvert_thread_lists_empty(osb))) {
wait_event_interruptible(osb->dc_event,
ocfs2_downconvert_thread_should_wake(osb) ||
kthread_should_stop());
mlog(0, "downconvert_thread: awoken\n");
ocfs2_downconvert_thread_do_work(osb);
}
osb->dc_task = NULL;
return status;
}
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
{
spin_lock(&osb->dc_task_lock);
/* make sure the voting thread gets a swipe at whatever changes
* the caller may have made to the voting state */
osb->dc_wake_sequence++;
spin_unlock(&osb->dc_task_lock);
wake_up(&osb->dc_event);
}
......@@ -54,7 +54,7 @@ struct ocfs2_meta_lvb {
#define OCFS2_META_LOCK_RECOVERY (0x01)
/* Instruct the dlm not to queue ourselves on the other node. */
#define OCFS2_META_LOCK_NOQUEUE (0x02)
/* don't block waiting for the vote thread, instead return -EAGAIN */
/* don't block waiting for the downconvert thread, instead return -EAGAIN */
#define OCFS2_LOCK_NONBLOCK (0x04)
int ocfs2_dlm_init(struct ocfs2_super *osb);
......@@ -112,9 +112,10 @@ void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres);
void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
/* for the vote thread */
/* for the downconvert thread */
void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
......
......@@ -41,7 +41,6 @@
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "vote.h"
#include "buffer_head_io.h"
......@@ -58,9 +57,7 @@ static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
void ocfs2_init_node_maps(struct ocfs2_super *osb)
{
spin_lock_init(&osb->node_map_lock);
ocfs2_node_map_init(&osb->mounted_map);
ocfs2_node_map_init(&osb->recovery_map);
ocfs2_node_map_init(&osb->umount_map);
ocfs2_node_map_init(&osb->osb_recovering_orphan_dirs);
}
......@@ -82,8 +79,6 @@ static void ocfs2_do_node_down(int node_num,
}
ocfs2_recovery_thread(osb, node_num);
ocfs2_remove_node_from_vote_queues(osb, node_num);
}
/* Called from the dlm when it's about to evict a node. We may also
......@@ -268,8 +263,6 @@ int ocfs2_recovery_map_set(struct ocfs2_super *osb,
spin_lock(&osb->node_map_lock);
__ocfs2_node_map_clear_bit(&osb->mounted_map, num);
if (!test_bit(num, osb->recovery_map.map)) {
__ocfs2_node_map_set_bit(&osb->recovery_map, num);
set = 1;
......
......@@ -49,7 +49,6 @@
#include "symlink.h"
#include "sysfile.h"
#include "uptodate.h"
#include "vote.h"
#include "buffer_head_io.h"
......@@ -718,8 +717,8 @@ static int ocfs2_wipe_inode(struct inode *inode,
}
/* we do this while holding the orphan dir lock because we
* don't want recovery being run from another node to vote for
* an inode delete on us -- this will result in two nodes
* don't want recovery being run from another node to try an
* inode delete underneath us -- this will result in two nodes
* truncating the same file! */
status = ocfs2_truncate_for_delete(osb, inode, di_bh);
if (status < 0) {
......@@ -744,7 +743,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
}
/* There is a series of simple checks that should be done before a
* vote is even considered. Encapsulate those in this function. */
* trylock is even considered. Encapsulate those in this function. */
static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
{
int ret = 0;
......@@ -758,14 +757,14 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
goto bail;
}
/* If we're coming from process_vote we can't go into our own
/* If we're coming from downconvert_thread we can't go into our own
* voting [hello, deadlock city!], so unforuntately we just
* have to skip deleting this guy. That's OK though because
* the node who's doing the actual deleting should handle it
* anyway. */
if (current == osb->vote_task) {
if (current == osb->dc_task) {
mlog(0, "Skipping delete of %lu because we're currently "
"in process_vote\n", inode->i_ino);
"in downconvert\n", inode->i_ino);
goto bail;
}
......@@ -779,10 +778,9 @@ static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
goto bail_unlock;
}
/* If we have voted "yes" on the wipe of this inode for
* another node, it will be marked here so we can safely skip
* it. Recovery will cleanup any inodes we might inadvertantly
* skip here. */
/* If we have allowd wipe of this inode for another node, it
* will be marked here so we can safely skip it. Recovery will
* cleanup any inodes we might inadvertantly skip here. */
if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) {
mlog(0, "Skipping delete of %lu because another node "
"has done this for us.\n", inode->i_ino);
......@@ -929,7 +927,7 @@ void ocfs2_delete_inode(struct inode *inode)
/* Lock down the inode. This gives us an up to date view of
* it's metadata (for verification), and allows us to
* serialize delete_inode votes.
* serialize delete_inode on multiple nodes.
*
* Even though we might be doing a truncate, we don't take the
* allocation lock here as it won't be needed - nobody will
......@@ -947,15 +945,15 @@ void ocfs2_delete_inode(struct inode *inode)
* before we go ahead and wipe the inode. */
status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
if (!wipe || status < 0) {
/* Error and inode busy vote both mean we won't be
/* Error and remote inode busy both mean we won't be
* removing the inode, so they take almost the same
* path. */
if (status < 0)
mlog_errno(status);
/* Someone in the cluster has voted to not wipe this
* inode, or it was never completely orphaned. Write
* out the pages and exit now. */
/* Someone in the cluster has disallowed a wipe of
* this inode, or it was never completely
* orphaned. Write out the pages and exit now. */
ocfs2_cleanup_delete_inode(inode, 1);
goto bail_unlock_inode;
}
......@@ -1008,12 +1006,12 @@ void ocfs2_clear_inode(struct inode *inode)
mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
"Inode=%lu\n", inode->i_ino);
/* For remove delete_inode vote, we hold open lock before,
* now it is time to unlock PR and EX open locks. */
/* To preven remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);
/* Do these before all the other work so that we don't bounce
* the vote thread while waiting to destroy the locks. */
* the downconvert thread while waiting to destroy the locks. */
ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
ocfs2_mark_lockres_freeing(&oi->ip_meta_lockres);
ocfs2_mark_lockres_freeing(&oi->ip_data_lockres);
......
......@@ -44,7 +44,6 @@
#include "localalloc.h"
#include "slot_map.h"
#include "super.h"
#include "vote.h"
#include "sysfile.h"
#include "buffer_head_io.h"
......@@ -103,7 +102,7 @@ static int ocfs2_commit_cache(struct ocfs2_super *osb)
mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
journal->j_trans_id, flushed);
ocfs2_kick_vote_thread(osb);
ocfs2_wake_downconvert_thread(osb);
wake_up(&journal->j_checkpointed);
finally:
mlog_exit(status);
......@@ -883,8 +882,8 @@ static int __ocfs2_recovery_thread(void *arg)
ocfs2_super_unlock(osb, 1);
/* We always run recovery on our own orphan dir - the dead
* node(s) may have voted "no" on an inode delete earlier. A
* revote is therefore required. */
* node(s) may have disallowd a previos inode delete. Re-processing
* is therefore required. */
ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
NULL);
......@@ -1380,10 +1379,10 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
iter = oi->ip_next_orphan;
spin_lock(&oi->ip_lock);
/* Delete voting may have set these on the assumption
* that the other node would wipe them successfully.
* If they are still in the node's orphan dir, we need
* to reset that state. */
/* The remote delete code may have set these on the
* assumption that the other node would wipe them
* successfully. If they are still in the node's
* orphan dir, we need to reset that state. */
oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
/* Set the proper information to get us going into
......
......@@ -60,7 +60,6 @@
#include "symlink.h"
#include "sysfile.h"
#include "uptodate.h"
#include "vote.h"
#include "buffer_head_io.h"
......@@ -176,7 +175,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
/* Don't drop the cluster lock until *after* the d_add --
* unlink on another node will message us to remove that
* dentry under this lock so otherwise we can race this with
* the vote thread and have a stale dentry. */
* the downconvert thread and have a stale dentry. */
ocfs2_meta_unlock(dir, 0);
bail:
......@@ -765,7 +764,7 @@ static int ocfs2_unlink(struct inode *dir,
status = ocfs2_remote_dentry_delete(dentry);
if (status < 0) {
/* This vote should succeed under all normal
/* This remote delete should succeed under all normal
* circumstances. */
mlog_errno(status);
goto leave;
......@@ -1031,8 +1030,9 @@ static int ocfs2_rename(struct inode *old_dir,
/*
* Aside from allowing a meta data update, the locking here
* also ensures that the vote thread on other nodes won't have
* to concurrently downconvert the inode and the dentry locks.
* also ensures that the downconvert thread on other nodes
* won't have to concurrently downconvert the inode and the
* dentry locks.
*/
status = ocfs2_meta_lock(old_inode, &old_inode_bh, 1);
if (status < 0) {
......
......@@ -189,9 +189,7 @@ struct ocfs2_super
struct ocfs2_slot_info *slot_info;
spinlock_t node_map_lock;
struct ocfs2_node_map mounted_map;
struct ocfs2_node_map recovery_map;
struct ocfs2_node_map umount_map;
u64 root_blkno;
u64 system_dir_blkno;
......@@ -254,28 +252,15 @@ struct ocfs2_super
wait_queue_head_t recovery_event;
spinlock_t vote_task_lock;
struct task_struct *vote_task;
wait_queue_head_t vote_event;
unsigned long vote_wake_sequence;
unsigned long vote_work_sequence;
spinlock_t dc_task_lock;
struct task_struct *dc_task;
wait_queue_head_t dc_event;
unsigned long dc_wake_sequence;
unsigned long dc_work_sequence;
struct list_head blocked_lock_list;
unsigned long blocked_lock_count;
struct list_head vote_list;
int vote_count;
u32 net_key;
spinlock_t net_response_lock;
unsigned int net_response_ids;
struct list_head net_response_list;
struct o2hb_callback_func osb_hb_up;
struct o2hb_callback_func osb_hb_down;
struct list_head osb_net_handlers;
wait_queue_head_t osb_mount_event;
/* Truncate log info */
......
......@@ -48,25 +48,6 @@ static void __ocfs2_fill_slot(struct ocfs2_slot_info *si,
s16 slot_num,
s16 node_num);
/* Use the slot information we've collected to create a map of mounted
* nodes. Should be holding an EX on super block. assumes slot info is
* up to date. Note that we call this *after* we find a slot, so our
* own node should be set in the map too... */
void ocfs2_populate_mounted_map(struct ocfs2_super *osb)
{
int i;
struct ocfs2_slot_info *si = osb->slot_info;
spin_lock(&si->si_lock);
for (i = 0; i < si->si_size; i++)
if (si->si_global_node_nums[i] != OCFS2_INVALID_SLOT)
ocfs2_node_map_set_bit(osb, &osb->mounted_map,
si->si_global_node_nums[i]);
spin_unlock(&si->si_lock);
}
/* post the slot information on disk into our slot_info struct. */
void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
{
......
......@@ -52,8 +52,6 @@ s16 ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
void ocfs2_clear_slot(struct ocfs2_slot_info *si,
s16 slot_num);
void ocfs2_populate_mounted_map(struct ocfs2_super *osb);
static inline int ocfs2_is_empty_slot(struct ocfs2_slot_info *si,
int slot_num)
{
......
......@@ -65,7 +65,6 @@
#include "sysfile.h"
#include "uptodate.h"
#include "ver.h"
#include "vote.h"
#include "buffer_head_io.h"
......@@ -1123,13 +1122,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
goto leave;
}
/* requires vote_thread to be running. */
status = ocfs2_register_net_handlers(osb);
if (status < 0) {
mlog_errno(status);
goto leave;
}
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
......@@ -1144,8 +1136,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
goto leave;
}
ocfs2_populate_mounted_map(osb);
/* load all node-local system inodes */
status = ocfs2_init_local_system_inodes(osb);
if (status < 0) {
......@@ -1168,15 +1158,6 @@ static int ocfs2_mount_volume(struct super_block *sb)
if (ocfs2_mount_local(osb))
goto leave;
/* This should be sent *after* we recovered our journal as it
* will cause other nodes to unmark us as needing
* recovery. However, we need to send it *before* dropping the
* super block lock as otherwise their recovery threads might
* try to clean us up while we're live! */
status = ocfs2_request_mount_vote(osb);
if (status < 0)
mlog_errno(status);
leave:
if (unlock_super)
ocfs2_super_unlock(osb, 1);
......@@ -1234,10 +1215,6 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
mlog_errno(tmp);
return;
}
tmp = ocfs2_request_umount_vote(osb);
if (tmp < 0)
mlog_errno(tmp);
}
if (osb->slot_num != OCFS2_INVALID_SLOT)
......@@ -1248,11 +1225,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
ocfs2_release_system_inodes(osb);
if (osb->dlm) {
ocfs2_unregister_net_handlers(osb);
if (osb->dlm)
ocfs2_dlm_shutdown(osb);
}
debugfs_remove(osb->osb_debug_root);
......@@ -1336,19 +1310,13 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->s_sectsize_bits = blksize_bits(sector_size);
BUG_ON(!osb->s_sectsize_bits);
osb->net_response_ids = 0;
spin_lock_init(&osb->net_response_lock);
INIT_LIST_HEAD(&osb->net_response_list);
INIT_LIST_HEAD(&osb->osb_net_handlers);
init_waitqueue_head(&osb->recovery_event);
spin_lock_init(&osb->vote_task_lock);
init_waitqueue_head(&osb->vote_event);
osb->vote_work_sequence = 0;
osb->vote_wake_sequence = 0;
spin_lock_init(&osb->dc_task_lock);
init_waitqueue_head(&osb->dc_event);
osb->dc_work_sequence = 0;
osb->dc_wake_sequence = 0;
INIT_LIST_HEAD(&osb->blocked_lock_list);
osb->blocked_lock_count = 0;
INIT_LIST_HEAD(&osb->vote_list);
spin_lock_init(&osb->osb_lock);
atomic_set(&osb->alloc_stats.moves, 0);
......@@ -1488,7 +1456,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
}
memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key));
osb->net_key = le32_to_cpu(uuid_net_key);
strncpy(osb->vol_label, di->id2.i_super.s_label, 63);
osb->vol_label[63] = '\0';
......
This diff is collapsed.
/* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* vote.h
*
* description here
*
* Copyright (C) 2002, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef VOTE_H
#define VOTE_H
int ocfs2_vote_thread(void *arg);
static inline void ocfs2_kick_vote_thread(struct ocfs2_super *osb)
{
spin_lock(&osb->vote_task_lock);
/* make sure the voting thread gets a swipe at whatever changes
* the caller may have made to the voting state */
osb->vote_wake_sequence++;
spin_unlock(&osb->vote_task_lock);
wake_up(&osb->vote_event);
}
int ocfs2_request_mount_vote(struct ocfs2_super *osb);
int ocfs2_request_umount_vote(struct ocfs2_super *osb);
int ocfs2_register_net_handlers(struct ocfs2_super *osb);
void ocfs2_unregister_net_handlers(struct ocfs2_super *osb);
void ocfs2_remove_node_from_vote_queues(struct ocfs2_super *osb,
int node_num);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment