Commit eccba068 authored by Pavel Emelyanov's avatar Pavel Emelyanov Committed by Linus Torvalds

gfs2: make gfs2_glock.gl_owner_pid be a struct pid *

The gl_owner_pid field is used to get the lock owning task by its pid, so make
it in a proper manner, i.e.  by using the struct pid pointer and pid_task()
function.

The pid_task() becomes exported for the gfs2 module.
Signed-off-by: default avatarPavel Emelyanov <xemul@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4cbc76ea
...@@ -334,7 +334,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -334,7 +334,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_state = LM_ST_UNLOCKED; gl->gl_state = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE; gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash; gl->gl_hash = hash;
gl->gl_owner_pid = 0; gl->gl_owner_pid = NULL;
gl->gl_ip = 0; gl->gl_ip = 0;
gl->gl_ops = glops; gl->gl_ops = glops;
gl->gl_req_gh = NULL; gl->gl_req_gh = NULL;
...@@ -632,7 +632,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) ...@@ -632,7 +632,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
wait_on_holder(&gh); wait_on_holder(&gh);
gfs2_holder_uninit(&gh); gfs2_holder_uninit(&gh);
} else { } else {
gl->gl_owner_pid = current->pid; gl->gl_owner_pid = get_pid(task_pid(current));
gl->gl_ip = (unsigned long)__builtin_return_address(0); gl->gl_ip = (unsigned long)__builtin_return_address(0);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
} }
...@@ -653,7 +653,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) ...@@ -653,7 +653,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
acquired = 0; acquired = 0;
} else { } else {
gl->gl_owner_pid = current->pid; gl->gl_owner_pid = get_pid(task_pid(current));
gl->gl_ip = (unsigned long)__builtin_return_address(0); gl->gl_ip = (unsigned long)__builtin_return_address(0);
} }
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
...@@ -669,12 +669,17 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) ...@@ -669,12 +669,17 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
static void gfs2_glmutex_unlock(struct gfs2_glock *gl) static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
{ {
struct pid *pid;
spin_lock(&gl->gl_spin); spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
gl->gl_owner_pid = 0; pid = gl->gl_owner_pid;
gl->gl_owner_pid = NULL;
gl->gl_ip = 0; gl->gl_ip = 0;
run_queue(gl); run_queue(gl);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
put_pid(pid);
} }
/** /**
...@@ -1881,13 +1886,13 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) ...@@ -1881,13 +1886,13 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
print_dbg(gi, " gl_state = %u\n", gl->gl_state); print_dbg(gi, " gl_state = %u\n", gl->gl_state);
if (gl->gl_owner_pid) { if (gl->gl_owner_pid) {
gl_owner = find_task_by_pid(gl->gl_owner_pid); gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
if (gl_owner) if (gl_owner)
print_dbg(gi, " gl_owner = pid %d (%s)\n", print_dbg(gi, " gl_owner = pid %d (%s)\n",
gl->gl_owner_pid, gl_owner->comm); pid_nr(gl->gl_owner_pid), gl_owner->comm);
else else
print_dbg(gi, " gl_owner = %d (ended)\n", print_dbg(gi, " gl_owner = %d (ended)\n",
gl->gl_owner_pid); pid_nr(gl->gl_owner_pid));
} else } else
print_dbg(gi, " gl_owner = -1\n"); print_dbg(gi, " gl_owner = -1\n");
print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
......
...@@ -182,7 +182,7 @@ struct gfs2_glock { ...@@ -182,7 +182,7 @@ struct gfs2_glock {
unsigned int gl_hash; unsigned int gl_hash;
unsigned int gl_demote_state; /* state requested by remote node */ unsigned int gl_demote_state; /* state requested by remote node */
unsigned long gl_demote_time; /* time of first demote request */ unsigned long gl_demote_time; /* time of first demote request */
pid_t gl_owner_pid; struct pid *gl_owner_pid;
unsigned long gl_ip; unsigned long gl_ip;
struct list_head gl_holders; struct list_head gl_holders;
struct list_head gl_waiters1; /* HIF_MUTEX */ struct list_head gl_waiters1; /* HIF_MUTEX */
......
...@@ -368,6 +368,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) ...@@ -368,6 +368,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
} }
return result; return result;
} }
EXPORT_SYMBOL(pid_task);
/* /*
* Must be called under rcu_read_lock() or with tasklist_lock read-held. * Must be called under rcu_read_lock() or with tasklist_lock read-held.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment