Commit 61e0d47c authored by Miklos Szeredi's avatar Miklos Szeredi Committed by Jens Axboe

splice: add helpers for locking pipe inode

There are lots of sequences like this, especially in splice code:

	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);
	/* do something */
	if (pipe->inode)
		mutex_unlock(&pipe->inode->i_mutex);

so introduce helpers which do the conditional locking and unlocking.
Also replace the inode_double_lock() call with a pipe_double_lock()
helper to avoid spreading the use of this functionality beyond the
pipe code.

This patch is just a cleanup, and should cause no behavioral changes.
Signed-off-by: default avatarMiklos Szeredi <mszeredi@suse.cz>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent f8cc774c
...@@ -1470,42 +1470,6 @@ static void __wait_on_freeing_inode(struct inode *inode) ...@@ -1470,42 +1470,6 @@ static void __wait_on_freeing_inode(struct inode *inode)
spin_lock(&inode_lock); spin_lock(&inode_lock);
} }
/*
* We rarely want to lock two inodes that do not have a parent/child
* relationship (such as directory, child inode) simultaneously. The
* vast majority of file systems should be able to get along fine
* without this. Do not use these functions except as a last resort.
*/
void inode_double_lock(struct inode *inode1, struct inode *inode2)
{
if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
if (inode1)
mutex_lock(&inode1->i_mutex);
else if (inode2)
mutex_lock(&inode2->i_mutex);
return;
}
if (inode1 < inode2) {
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
} else {
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
}
}
EXPORT_SYMBOL(inode_double_lock);
void inode_double_unlock(struct inode *inode1, struct inode *inode2)
{
if (inode1)
mutex_unlock(&inode1->i_mutex);
if (inode2 && inode2 != inode1)
mutex_unlock(&inode2->i_mutex);
}
EXPORT_SYMBOL(inode_double_unlock);
static __initdata unsigned long ihash_entries; static __initdata unsigned long ihash_entries;
static int __init set_ihash_entries(char *str) static int __init set_ihash_entries(char *str)
{ {
......
...@@ -37,6 +37,42 @@ ...@@ -37,6 +37,42 @@
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
*/ */
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
{
if (pipe->inode)
mutex_lock_nested(&pipe->inode->i_mutex, subclass);
}
void pipe_lock(struct pipe_inode_info *pipe)
{
/*
* pipe_lock() nests non-pipe inode locks (for writing to a file)
*/
pipe_lock_nested(pipe, I_MUTEX_PARENT);
}
EXPORT_SYMBOL(pipe_lock);
void pipe_unlock(struct pipe_inode_info *pipe)
{
if (pipe->inode)
mutex_unlock(&pipe->inode->i_mutex);
}
EXPORT_SYMBOL(pipe_unlock);
void pipe_double_lock(struct pipe_inode_info *pipe1,
struct pipe_inode_info *pipe2)
{
BUG_ON(pipe1 == pipe2);
if (pipe1 < pipe2) {
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
}
}
/* Drop the inode semaphore and wait for a pipe event, atomically */ /* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe) void pipe_wait(struct pipe_inode_info *pipe)
{ {
...@@ -47,12 +83,10 @@ void pipe_wait(struct pipe_inode_info *pipe) ...@@ -47,12 +83,10 @@ void pipe_wait(struct pipe_inode_info *pipe)
* is considered a noninteractive wait: * is considered a noninteractive wait:
*/ */
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
if (pipe->inode) pipe_unlock(pipe);
mutex_unlock(&pipe->inode->i_mutex);
schedule(); schedule();
finish_wait(&pipe->wait, &wait); finish_wait(&pipe->wait, &wait);
if (pipe->inode) pipe_lock(pipe);
mutex_lock(&pipe->inode->i_mutex);
} }
static int static int
......
...@@ -182,8 +182,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, ...@@ -182,8 +182,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
do_wakeup = 0; do_wakeup = 0;
page_nr = 0; page_nr = 0;
if (pipe->inode) pipe_lock(pipe);
mutex_lock(&pipe->inode->i_mutex);
for (;;) { for (;;) {
if (!pipe->readers) { if (!pipe->readers) {
...@@ -245,8 +244,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, ...@@ -245,8 +244,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
pipe->waiting_writers--; pipe->waiting_writers--;
} }
if (pipe->inode) { pipe_unlock(pipe);
mutex_unlock(&pipe->inode->i_mutex);
if (do_wakeup) { if (do_wakeup) {
smp_mb(); smp_mb();
...@@ -254,7 +252,6 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, ...@@ -254,7 +252,6 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
wake_up_interruptible(&pipe->wait); wake_up_interruptible(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
} }
}
while (page_nr < spd_pages) while (page_nr < spd_pages)
spd->spd_release(spd, page_nr++); spd->spd_release(spd, page_nr++);
...@@ -801,11 +798,9 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, ...@@ -801,11 +798,9 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
.u.file = out, .u.file = out,
}; };
if (pipe->inode) pipe_lock(pipe);
mutex_lock(&pipe->inode->i_mutex);
ret = __splice_from_pipe(pipe, &sd, actor); ret = __splice_from_pipe(pipe, &sd, actor);
if (pipe->inode) pipe_unlock(pipe);
mutex_unlock(&pipe->inode->i_mutex);
return ret; return ret;
} }
...@@ -837,8 +832,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, ...@@ -837,8 +832,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
}; };
ssize_t ret; ssize_t ret;
if (pipe->inode) pipe_lock(pipe);
mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
splice_from_pipe_begin(&sd); splice_from_pipe_begin(&sd);
do { do {
...@@ -854,8 +848,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, ...@@ -854,8 +848,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
} while (ret > 0); } while (ret > 0);
splice_from_pipe_end(pipe, &sd); splice_from_pipe_end(pipe, &sd);
if (pipe->inode) pipe_unlock(pipe);
mutex_unlock(&pipe->inode->i_mutex);
if (sd.num_spliced) if (sd.num_spliced)
ret = sd.num_spliced; ret = sd.num_spliced;
...@@ -1348,8 +1341,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, ...@@ -1348,8 +1341,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
if (!pipe) if (!pipe)
return -EBADF; return -EBADF;
if (pipe->inode) pipe_lock(pipe);
mutex_lock(&pipe->inode->i_mutex);
error = ret = 0; error = ret = 0;
while (nr_segs) { while (nr_segs) {
...@@ -1404,8 +1396,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, ...@@ -1404,8 +1396,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
iov++; iov++;
} }
if (pipe->inode) pipe_unlock(pipe);
mutex_unlock(&pipe->inode->i_mutex);
if (!ret) if (!ret)
ret = error; ret = error;
...@@ -1533,7 +1524,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ...@@ -1533,7 +1524,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
return 0; return 0;
ret = 0; ret = 0;
mutex_lock(&pipe->inode->i_mutex); pipe_lock(pipe);
while (!pipe->nrbufs) { while (!pipe->nrbufs) {
if (signal_pending(current)) { if (signal_pending(current)) {
...@@ -1551,7 +1542,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ...@@ -1551,7 +1542,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe_wait(pipe); pipe_wait(pipe);
} }
mutex_unlock(&pipe->inode->i_mutex); pipe_unlock(pipe);
return ret; return ret;
} }
...@@ -1571,7 +1562,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ...@@ -1571,7 +1562,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
return 0; return 0;
ret = 0; ret = 0;
mutex_lock(&pipe->inode->i_mutex); pipe_lock(pipe);
while (pipe->nrbufs >= PIPE_BUFFERS) { while (pipe->nrbufs >= PIPE_BUFFERS) {
if (!pipe->readers) { if (!pipe->readers) {
...@@ -1592,7 +1583,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ...@@ -1592,7 +1583,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
pipe->waiting_writers--; pipe->waiting_writers--;
} }
mutex_unlock(&pipe->inode->i_mutex); pipe_unlock(pipe);
return ret; return ret;
} }
...@@ -1608,10 +1599,10 @@ static int link_pipe(struct pipe_inode_info *ipipe, ...@@ -1608,10 +1599,10 @@ static int link_pipe(struct pipe_inode_info *ipipe,
/* /*
* Potential ABBA deadlock, work around it by ordering lock * Potential ABBA deadlock, work around it by ordering lock
* grabbing by inode address. Otherwise two different processes * grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A). * could deadlock (one doing tee from A -> B, the other from B -> A).
*/ */
inode_double_lock(ipipe->inode, opipe->inode); pipe_double_lock(ipipe, opipe);
do { do {
if (!opipe->readers) { if (!opipe->readers) {
...@@ -1662,7 +1653,8 @@ static int link_pipe(struct pipe_inode_info *ipipe, ...@@ -1662,7 +1653,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
ret = -EAGAIN; ret = -EAGAIN;
inode_double_unlock(ipipe->inode, opipe->inode); pipe_unlock(ipipe);
pipe_unlock(opipe);
/* /*
* If we put data in the output pipe, wakeup any potential readers. * If we put data in the output pipe, wakeup any potential readers.
......
...@@ -797,9 +797,6 @@ enum inode_i_mutex_lock_class ...@@ -797,9 +797,6 @@ enum inode_i_mutex_lock_class
I_MUTEX_QUOTA I_MUTEX_QUOTA
}; };
extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
/* /*
* NOTE: in a 32bit arch with a preemptable kernel and * NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic * an UP compile the i_size_read/write must be atomic
......
...@@ -134,6 +134,11 @@ struct pipe_buf_operations { ...@@ -134,6 +134,11 @@ struct pipe_buf_operations {
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE #define PIPE_SIZE PAGE_SIZE
/* Pipe lock and unlock operations */
void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
/* Drop the inode semaphore and wait for a pipe event, atomically */ /* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe); void pipe_wait(struct pipe_inode_info *pipe);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment