Commit eebead5b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Paul Mackerras

[POWERPC] spufs: Fix state_mutex leaks

Fix various state_mutex leaks.  The worst one was introduced by the
interrutible state_mutex conversion but there've been a few before
too.  Notably spufs_wait now returns without the state_mutex held
when returning an error, which actually cleans up some code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarLuke Browning <lukebrowning@us.ibm.com>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 592a607b
...@@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -108,7 +108,7 @@ int spufs_handle_class1(struct spu_context *ctx)
u64 ea, dsisr, access; u64 ea, dsisr, access;
unsigned long flags; unsigned long flags;
unsigned flt = 0; unsigned flt = 0;
int ret, ret2; int ret;
/* /*
* dar and dsisr get passed from the registers * dar and dsisr get passed from the registers
...@@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -148,13 +148,10 @@ int spufs_handle_class1(struct spu_context *ctx)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
/* /*
* If spu_acquire fails due to a pending signal we just want to return * This is nasty: we need the state_mutex for all the bookkeeping even
* EINTR to userspace even if that means missing the dma restart or * if the syscall was interrupted by a signal. ewww.
* updating the page fault statistics.
*/ */
ret2 = spu_acquire(ctx); mutex_lock(&ctx->state_mutex);
if (ret2)
goto out;
/* /*
* Clear dsisr under ctxt lock after handling the fault, so that * Clear dsisr under ctxt lock after handling the fault, so that
...@@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -185,7 +182,6 @@ int spufs_handle_class1(struct spu_context *ctx)
} else } else
spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
out:
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret; return ret;
} }
...@@ -358,6 +358,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, ...@@ -358,6 +358,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
{ {
struct spu_context *ctx = vma->vm_file->private_data; struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start; unsigned long area, offset = address - vma->vm_start;
int ret = 0;
spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
...@@ -379,7 +380,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, ...@@ -379,7 +380,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
} else { } else {
...@@ -388,7 +389,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, ...@@ -388,7 +389,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
} }
spu_release(ctx); if (!ret)
spu_release(ctx);
return NOPFN_REFAULT; return NOPFN_REFAULT;
} }
...@@ -755,23 +757,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, ...@@ -755,23 +757,25 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
count = spu_acquire(ctx); count = spu_acquire(ctx);
if (count) if (count)
return count; goto out;
/* wait only for the first element */ /* wait only for the first element */
count = 0; count = 0;
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (!spu_ibox_read(ctx, &ibox_data)) if (!spu_ibox_read(ctx, &ibox_data)) {
count = -EAGAIN; count = -EAGAIN;
goto out_unlock;
}
} else { } else {
count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
if (count)
goto out;
} }
if (count)
goto out;
/* if we can't write at all, return -EFAULT */ /* if we can't write at all, return -EFAULT */
count = __put_user(ibox_data, udata); count = __put_user(ibox_data, udata);
if (count) if (count)
goto out; goto out_unlock;
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
int ret; int ret;
...@@ -788,9 +792,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, ...@@ -788,9 +792,9 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
break; break;
} }
out: out_unlock:
spu_release(ctx); spu_release(ctx);
out:
return count; return count;
} }
...@@ -905,7 +909,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, ...@@ -905,7 +909,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
count = spu_acquire(ctx); count = spu_acquire(ctx);
if (count) if (count)
return count; goto out;
/* /*
* make sure we can at least write one element, by waiting * make sure we can at least write one element, by waiting
...@@ -913,14 +917,16 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, ...@@ -913,14 +917,16 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
*/ */
count = 0; count = 0;
if (file->f_flags & O_NONBLOCK) { if (file->f_flags & O_NONBLOCK) {
if (!spu_wbox_write(ctx, wbox_data)) if (!spu_wbox_write(ctx, wbox_data)) {
count = -EAGAIN; count = -EAGAIN;
goto out_unlock;
}
} else { } else {
count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
if (count)
goto out;
} }
if (count)
goto out;
/* write as much as possible */ /* write as much as possible */
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
...@@ -934,8 +940,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, ...@@ -934,8 +940,9 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
break; break;
} }
out: out_unlock:
spu_release(ctx); spu_release(ctx);
out:
return count; return count;
} }
...@@ -1598,12 +1605,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, ...@@ -1598,12 +1605,11 @@ static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
} else { } else {
ret = spufs_wait(ctx->mfc_wq, ret = spufs_wait(ctx->mfc_wq,
spufs_read_mfc_tagstatus(ctx, &status)); spufs_read_mfc_tagstatus(ctx, &status));
if (ret)
goto out;
} }
spu_release(ctx); spu_release(ctx);
if (ret)
goto out;
ret = 4; ret = 4;
if (copy_to_user(buffer, &status, 4)) if (copy_to_user(buffer, &status, 4))
ret = -EFAULT; ret = -EFAULT;
...@@ -1732,6 +1738,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, ...@@ -1732,6 +1738,8 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
int status; int status;
ret = spufs_wait(ctx->mfc_wq, ret = spufs_wait(ctx->mfc_wq,
spu_send_mfc_command(ctx, cmd, &status)); spu_send_mfc_command(ctx, cmd, &status));
if (ret)
goto out;
if (status) if (status)
ret = status; ret = status;
} }
...@@ -1785,7 +1793,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ...@@ -1785,7 +1793,7 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
ret = spu_acquire(ctx); ret = spu_acquire(ctx);
if (ret) if (ret)
return ret; goto out;
#if 0 #if 0
/* this currently hangs */ /* this currently hangs */
ret = spufs_wait(ctx->mfc_wq, ret = spufs_wait(ctx->mfc_wq,
...@@ -1794,12 +1802,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id) ...@@ -1794,12 +1802,13 @@ static int spufs_mfc_flush(struct file *file, fl_owner_t id)
goto out; goto out;
ret = spufs_wait(ctx->mfc_wq, ret = spufs_wait(ctx->mfc_wq,
ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
out: if (ret)
goto out;
#else #else
ret = 0; ret = 0;
#endif #endif
spu_release(ctx); spu_release(ctx);
out:
return ret; return ret;
} }
......
...@@ -354,8 +354,15 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ...@@ -354,8 +354,15 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
do { do {
ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
if (unlikely(ret)) if (unlikely(ret)) {
/*
* This is nasty: we need the state_mutex for all the
* bookkeeping even if the syscall was interrupted by
* a signal. ewww.
*/
mutex_lock(&ctx->state_mutex);
break; break;
}
spu = ctx->spu; spu = ctx->spu;
if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
&ctx->sched_flags))) { &ctx->sched_flags))) {
......
...@@ -268,6 +268,9 @@ extern char *isolated_loader; ...@@ -268,6 +268,9 @@ extern char *isolated_loader;
* Same as wait_event_interruptible(), except that here * Same as wait_event_interruptible(), except that here
* we need to call spu_release(ctx) before sleeping, and * we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken. * then spu_acquire(ctx) when awoken.
*
* Returns with state_mutex re-acquired when successfull or
* with -ERESTARTSYS and the state_mutex dropped when interrupted.
*/ */
#define spufs_wait(wq, condition) \ #define spufs_wait(wq, condition) \
...@@ -278,11 +281,11 @@ extern char *isolated_loader; ...@@ -278,11 +281,11 @@ extern char *isolated_loader;
prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
if (condition) \ if (condition) \
break; \ break; \
spu_release(ctx); \
if (signal_pending(current)) { \ if (signal_pending(current)) { \
__ret = -ERESTARTSYS; \ __ret = -ERESTARTSYS; \
break; \ break; \
} \ } \
spu_release(ctx); \
schedule(); \ schedule(); \
__ret = spu_acquire(ctx); \ __ret = spu_acquire(ctx); \
if (__ret) \ if (__ret) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment