Commit d6ad39bc authored by Jeremy Kerr's avatar Jeremy Kerr Committed by Paul Mackerras

[POWERPC] spufs: rework class 0 and 1 interrupt handling

Based on original patches from
 Arnd Bergmann <arnd.bergman@de.ibm.com>; and
 Luke Browning <lukebr@linux.vnet.ibm.com>

Currently, spu contexts need to be loaded to the SPU in order to take
class 0 and class 1 exceptions.

This change makes the actual interrupt-handlers much simpler (ie,
set the exception information in the context save area), and defers the
handling code to the spufs_handle_class[01] functions, called from
spufs_run_spu.

This should improve the concurrency of the spu scheduling leading to
greater SPU utilization when SPUs are overcommited.
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 8af30675
...@@ -132,27 +132,6 @@ int spu_64k_pages_available(void) ...@@ -132,27 +132,6 @@ int spu_64k_pages_available(void)
} }
EXPORT_SYMBOL_GPL(spu_64k_pages_available); EXPORT_SYMBOL_GPL(spu_64k_pages_available);
static int __spu_trap_invalid_dma(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
return 0;
}
static int __spu_trap_dma_align(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
return 0;
}
static int __spu_trap_error(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
return 0;
}
static void spu_restart_dma(struct spu *spu) static void spu_restart_dma(struct spu *spu)
{ {
struct spu_priv2 __iomem *priv2 = spu->priv2; struct spu_priv2 __iomem *priv2 = spu->priv2;
...@@ -252,10 +231,12 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) ...@@ -252,10 +231,12 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
return 1; return 1;
} }
spu->class_0_pending = 0;
spu->dar = ea; spu->dar = ea;
spu->dsisr = dsisr; spu->dsisr = dsisr;
mb();
spu->stop_callback(spu); spu->stop_callback(spu);
return 0; return 0;
} }
...@@ -335,12 +316,13 @@ spu_irq_class_0(int irq, void *data) ...@@ -335,12 +316,13 @@ spu_irq_class_0(int irq, void *data)
spu = data; spu = data;
spin_lock(&spu->register_lock);
mask = spu_int_mask_get(spu, 0); mask = spu_int_mask_get(spu, 0);
stat = spu_int_stat_get(spu, 0); stat = spu_int_stat_get(spu, 0) & mask;
stat &= mask;
spin_lock(&spu->register_lock);
spu->class_0_pending |= stat; spu->class_0_pending |= stat;
spu->dsisr = spu_mfc_dsisr_get(spu);
spu->dar = spu_mfc_dar_get(spu);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
spu->stop_callback(spu); spu->stop_callback(spu);
...@@ -350,31 +332,6 @@ spu_irq_class_0(int irq, void *data) ...@@ -350,31 +332,6 @@ spu_irq_class_0(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
int
spu_irq_class_0_bottom(struct spu *spu)
{
unsigned long flags;
unsigned long stat;
spin_lock_irqsave(&spu->register_lock, flags);
stat = spu->class_0_pending;
spu->class_0_pending = 0;
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
__spu_trap_dma_align(spu);
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
__spu_trap_invalid_dma(spu);
if (stat & CLASS0_SPU_ERROR_INTR)
__spu_trap_error(spu);
spin_unlock_irqrestore(&spu->register_lock, flags);
return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
static irqreturn_t static irqreturn_t
spu_irq_class_1(int irq, void *data) spu_irq_class_1(int irq, void *data)
{ {
......
...@@ -28,46 +28,69 @@ ...@@ -28,46 +28,69 @@
#include "spufs.h" #include "spufs.h"
static void spufs_handle_dma_error(struct spu_context *ctx, /**
* Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag.
*
* If the context was created with events, we just set the return event.
* Otherwise, send an appropriate signal to the process.
*/
static void spufs_handle_event(struct spu_context *ctx,
unsigned long ea, int type) unsigned long ea, int type)
{ {
siginfo_t info;
if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
ctx->event_return |= type; ctx->event_return |= type;
wake_up_all(&ctx->stop_wq); wake_up_all(&ctx->stop_wq);
} else { return;
siginfo_t info;
memset(&info, 0, sizeof(info));
switch (type) {
case SPE_EVENT_INVALID_DMA:
info.si_signo = SIGBUS;
info.si_code = BUS_OBJERR;
break;
case SPE_EVENT_SPE_DATA_STORAGE:
info.si_signo = SIGBUS;
info.si_addr = (void __user *)ea;
info.si_code = BUS_ADRERR;
break;
case SPE_EVENT_DMA_ALIGNMENT:
info.si_signo = SIGBUS;
/* DAR isn't set for an alignment fault :( */
info.si_code = BUS_ADRALN;
break;
case SPE_EVENT_SPE_ERROR:
info.si_signo = SIGILL;
info.si_addr = (void __user *)(unsigned long)
ctx->ops->npc_read(ctx) - 4;
info.si_code = ILL_ILLOPC;
break;
}
if (info.si_signo)
force_sig_info(info.si_signo, &info, current);
} }
memset(&info, 0, sizeof(info));
switch (type) {
case SPE_EVENT_INVALID_DMA:
info.si_signo = SIGBUS;
info.si_code = BUS_OBJERR;
break;
case SPE_EVENT_SPE_DATA_STORAGE:
info.si_signo = SIGBUS;
info.si_addr = (void __user *)ea;
info.si_code = BUS_ADRERR;
break;
case SPE_EVENT_DMA_ALIGNMENT:
info.si_signo = SIGBUS;
/* DAR isn't set for an alignment fault :( */
info.si_code = BUS_ADRALN;
break;
case SPE_EVENT_SPE_ERROR:
info.si_signo = SIGILL;
info.si_addr = (void __user *)(unsigned long)
ctx->ops->npc_read(ctx) - 4;
info.si_code = ILL_ILLOPC;
break;
}
if (info.si_signo)
force_sig_info(info.si_signo, &info, current);
} }
void spufs_dma_callback(struct spu *spu, int type) int spufs_handle_class0(struct spu_context *ctx)
{ {
spufs_handle_dma_error(spu->ctx, spu->dar, type); unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
if (likely(!stat))
return 0;
if (stat & CLASS0_DMA_ALIGNMENT_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
if (stat & CLASS0_SPU_ERROR_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
return -EIO;
} }
/* /*
...@@ -95,16 +118,8 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -95,16 +118,8 @@ int spufs_handle_class1(struct spu_context *ctx)
* in time, we can still expect to get the same fault * in time, we can still expect to get the same fault
* the immediately after the context restore. * the immediately after the context restore.
*/ */
if (ctx->state == SPU_STATE_RUNNABLE) { ea = ctx->csa.dar;
ea = ctx->spu->dar; dsisr = ctx->csa.dsisr;
dsisr = ctx->spu->dsisr;
ctx->spu->dar= ctx->spu->dsisr = 0;
} else {
ea = ctx->csa.priv1.mfc_dar_RW;
dsisr = ctx->csa.priv1.mfc_dsisr_RW;
ctx->csa.priv1.mfc_dar_RW = 0;
ctx->csa.priv1.mfc_dsisr_RW = 0;
}
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0; return 0;
...@@ -132,6 +147,14 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -132,6 +147,14 @@ int spufs_handle_class1(struct spu_context *ctx)
ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt);
spu_acquire(ctx); spu_acquire(ctx);
/*
* Clear dsisr under ctxt lock after handling the fault, so that
* time slicing will not preempt the context while the page fault
* handler is running. Context switch code removes mappings.
*/
ctx->csa.dar = ctx->csa.dsisr = 0;
/* /*
* If we handled the fault successfully and are in runnable * If we handled the fault successfully and are in runnable
* state, restart the DMA. * state, restart the DMA.
...@@ -152,7 +175,7 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -152,7 +175,7 @@ int spufs_handle_class1(struct spu_context *ctx)
if (ctx->spu) if (ctx->spu)
ctx->ops->restart_dma(ctx); ctx->ops->restart_dma(ctx);
} else } else
spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
return ret; return ret;
......
...@@ -15,7 +15,30 @@ void spufs_stop_callback(struct spu *spu) ...@@ -15,7 +15,30 @@ void spufs_stop_callback(struct spu *spu)
{ {
struct spu_context *ctx = spu->ctx; struct spu_context *ctx = spu->ctx;
wake_up_all(&ctx->stop_wq); /*
* It should be impossible to preempt a context while an exception
* is being processed, since the context switch code is specially
* coded to deal with interrupts ... But, just in case, sanity check
* the context pointer. It is OK to return doing nothing since
* the exception will be regenerated when the context is resumed.
*/
if (ctx) {
/* Copy exception arguments into module specific structure */
ctx->csa.class_0_pending = spu->class_0_pending;
ctx->csa.dsisr = spu->dsisr;
ctx->csa.dar = spu->dar;
/* ensure that the exception status has hit memory before a
* thread waiting on the context's stop queue is woken */
smp_wmb();
wake_up_all(&ctx->stop_wq);
}
/* Clear callback arguments from spu structure */
spu->class_0_pending = 0;
spu->dsisr = 0;
spu->dar = 0;
} }
static inline int spu_stopped(struct spu_context *ctx, u32 *stat) static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
...@@ -29,9 +52,9 @@ static inline int spu_stopped(struct spu_context *ctx, u32 *stat) ...@@ -29,9 +52,9 @@ static inline int spu_stopped(struct spu_context *ctx, u32 *stat)
if (ctx->state != SPU_STATE_RUNNABLE || if (ctx->state != SPU_STATE_RUNNABLE ||
test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
return 1; return 1;
pte_fault = spu->dsisr & pte_fault = ctx->csa.dsisr &
(MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ? return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ?
1 : 0; 1 : 0;
} }
...@@ -287,18 +310,6 @@ static int spu_process_callback(struct spu_context *ctx) ...@@ -287,18 +310,6 @@ static int spu_process_callback(struct spu_context *ctx)
return ret; return ret;
} }
static inline int spu_process_events(struct spu_context *ctx)
{
struct spu *spu = ctx->spu;
int ret = 0;
if (spu->class_0_pending)
ret = spu_irq_class_0_bottom(spu);
if (!ret && signal_pending(current))
ret = -ERESTARTSYS;
return ret;
}
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{ {
int ret; int ret;
...@@ -364,13 +375,20 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) ...@@ -364,13 +375,20 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
if (ret) if (ret)
break; break;
ret = spufs_handle_class0(ctx);
if (ret)
break;
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status); ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret) if (ret)
goto out2; goto out2;
continue; continue;
} }
ret = spu_process_events(ctx);
if (signal_pending(current))
ret = -ERESTARTSYS;
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_HALT |
......
...@@ -245,7 +245,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) ...@@ -245,7 +245,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = spufs_wbox_callback; spu->wbox_callback = spufs_wbox_callback;
spu->stop_callback = spufs_stop_callback; spu->stop_callback = spufs_stop_callback;
spu->mfc_callback = spufs_mfc_callback; spu->mfc_callback = spufs_mfc_callback;
spu->dma_callback = spufs_dma_callback;
mb(); mb();
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
spu_restore(&ctx->csa, spu); spu_restore(&ctx->csa, spu);
...@@ -433,7 +432,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) ...@@ -433,7 +432,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = NULL; spu->wbox_callback = NULL;
spu->stop_callback = NULL; spu->stop_callback = NULL;
spu->mfc_callback = NULL; spu->mfc_callback = NULL;
spu->dma_callback = NULL;
spu_associate_mm(spu, NULL); spu_associate_mm(spu, NULL);
spu->pid = 0; spu->pid = 0;
spu->tgid = 0; spu->tgid = 0;
......
...@@ -222,6 +222,7 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx); ...@@ -222,6 +222,7 @@ void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* fault handling */ /* fault handling */
int spufs_handle_class1(struct spu_context *ctx); int spufs_handle_class1(struct spu_context *ctx);
int spufs_handle_class0(struct spu_context *ctx);
/* affinity */ /* affinity */
struct spu *affinity_check(struct spu_context *ctx); struct spu *affinity_check(struct spu_context *ctx);
......
...@@ -2077,10 +2077,6 @@ int spu_save(struct spu_state *prev, struct spu *spu) ...@@ -2077,10 +2077,6 @@ int spu_save(struct spu_state *prev, struct spu *spu)
int rc; int rc;
acquire_spu_lock(spu); /* Step 1. */ acquire_spu_lock(spu); /* Step 1. */
prev->dar = spu->dar;
prev->dsisr = spu->dsisr;
spu->dar = 0;
spu->dsisr = 0;
rc = __do_spu_save(prev, spu); /* Steps 2-53. */ rc = __do_spu_save(prev, spu); /* Steps 2-53. */
release_spu_lock(spu); release_spu_lock(spu);
if (rc != 0 && rc != 2 && rc != 6) { if (rc != 0 && rc != 2 && rc != 6) {
...@@ -2107,9 +2103,6 @@ int spu_restore(struct spu_state *new, struct spu *spu) ...@@ -2107,9 +2103,6 @@ int spu_restore(struct spu_state *new, struct spu *spu)
acquire_spu_lock(spu); acquire_spu_lock(spu);
harvest(NULL, spu); harvest(NULL, spu);
spu->slb_replace = 0; spu->slb_replace = 0;
new->dar = 0;
new->dsisr = 0;
spu->class_0_pending = 0;
rc = __do_spu_restore(new, spu); rc = __do_spu_restore(new, spu);
release_spu_lock(spu); release_spu_lock(spu);
if (rc) { if (rc) {
......
...@@ -146,7 +146,6 @@ struct spu { ...@@ -146,7 +146,6 @@ struct spu {
void (* ibox_callback)(struct spu *spu); void (* ibox_callback)(struct spu *spu);
void (* stop_callback)(struct spu *spu); void (* stop_callback)(struct spu *spu);
void (* mfc_callback)(struct spu *spu); void (* mfc_callback)(struct spu *spu);
void (* dma_callback)(struct spu *spu, int type);
char irq_c0[8]; char irq_c0[8];
char irq_c1[8]; char irq_c1[8];
...@@ -197,8 +196,6 @@ struct cbe_spu_info { ...@@ -197,8 +196,6 @@ struct cbe_spu_info {
extern struct cbe_spu_info cbe_spu_info[]; extern struct cbe_spu_info cbe_spu_info[];
void spu_init_channels(struct spu *spu); void spu_init_channels(struct spu *spu);
int spu_irq_class_0_bottom(struct spu *spu);
int spu_irq_class_1_bottom(struct spu *spu);
void spu_irq_setaffinity(struct spu *spu, int cpu); void spu_irq_setaffinity(struct spu *spu, int cpu);
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
......
...@@ -254,7 +254,7 @@ struct spu_state { ...@@ -254,7 +254,7 @@ struct spu_state {
u64 spu_chnldata_RW[32]; u64 spu_chnldata_RW[32];
u32 spu_mailbox_data[4]; u32 spu_mailbox_data[4];
u32 pu_mailbox_data[1]; u32 pu_mailbox_data[1];
u64 dar, dsisr; u64 dar, dsisr, class_0_pending;
unsigned long suspend_time; unsigned long suspend_time;
spinlock_t register_lock; spinlock_t register_lock;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment