Commit f3d69e05 authored by Luke Browning's avatar Luke Browning Committed by Jeremy Kerr

[POWERPC] spufs: fix concurrent delivery of class 0 & 1 exceptions

SPU class 0 & 1 exceptions may occur in parallel, so we may end up
overwriting csa.dsisr.

This change adds dedicated fields for each class to the spu and the spu
context so that fault data is not overwritten.
Signed-off-by: default avatarLuke Browning <lukebr@linux.vnet.ibm.com>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
parent 7a214200
...@@ -226,11 +226,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) ...@@ -226,11 +226,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
return 0; return 0;
} }
spu->class_0_pending = 0; spu->class_1_dar = ea;
spu->dar = ea; spu->class_1_dsisr = dsisr;
spu->dsisr = dsisr;
spu->stop_callback(spu, 1);
spu->stop_callback(spu); spu->class_1_dar = 0;
spu->class_1_dsisr = 0;
return 0; return 0;
} }
...@@ -318,11 +320,15 @@ spu_irq_class_0(int irq, void *data) ...@@ -318,11 +320,15 @@ spu_irq_class_0(int irq, void *data)
stat = spu_int_stat_get(spu, 0) & mask; stat = spu_int_stat_get(spu, 0) & mask;
spu->class_0_pending |= stat; spu->class_0_pending |= stat;
spu->dsisr = spu_mfc_dsisr_get(spu); spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
spu->dar = spu_mfc_dar_get(spu); spu->class_0_dar = spu_mfc_dar_get(spu);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
spu->stop_callback(spu); spu->stop_callback(spu, 0);
spu->class_0_pending = 0;
spu->class_0_dsisr = 0;
spu->class_0_dar = 0;
spu_int_stat_clear(spu, 0, stat); spu_int_stat_clear(spu, 0, stat);
...@@ -363,6 +369,9 @@ spu_irq_class_1(int irq, void *data) ...@@ -363,6 +369,9 @@ spu_irq_class_1(int irq, void *data)
if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
; ;
spu->class_1_dsisr = 0;
spu->class_1_dar = 0;
return stat ? IRQ_HANDLED : IRQ_NONE; return stat ? IRQ_HANDLED : IRQ_NONE;
} }
...@@ -396,10 +405,10 @@ spu_irq_class_2(int irq, void *data) ...@@ -396,10 +405,10 @@ spu_irq_class_2(int irq, void *data)
spu->ibox_callback(spu); spu->ibox_callback(spu);
if (stat & CLASS2_SPU_STOP_INTR) if (stat & CLASS2_SPU_STOP_INTR)
spu->stop_callback(spu); spu->stop_callback(spu, 2);
if (stat & CLASS2_SPU_HALT_INTR) if (stat & CLASS2_SPU_HALT_INTR)
spu->stop_callback(spu); spu->stop_callback(spu, 2);
if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
spu->mfc_callback(spu); spu->mfc_callback(spu);
......
...@@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx) ...@@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx)
return 0; return 0;
if (stat & CLASS0_DMA_ALIGNMENT_INTR) if (stat & CLASS0_DMA_ALIGNMENT_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_DMA_ALIGNMENT);
if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_INVALID_DMA);
if (stat & CLASS0_SPU_ERROR_INTR) if (stat & CLASS0_SPU_ERROR_INTR)
spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); spufs_handle_event(ctx, ctx->csa.class_0_dar,
SPE_EVENT_SPE_ERROR);
ctx->csa.class_0_pending = 0;
return -EIO; return -EIO;
} }
...@@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx)
* in time, we can still expect to get the same fault * in time, we can still expect to get the same fault
* the immediately after the context restore. * the immediately after the context restore.
*/ */
ea = ctx->csa.dar; ea = ctx->csa.class_1_dar;
dsisr = ctx->csa.dsisr; dsisr = ctx->csa.class_1_dsisr;
if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
return 0; return 0;
...@@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx) ...@@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx)
* time slicing will not preempt the context while the page fault * time slicing will not preempt the context while the page fault
* handler is running. Context switch code removes mappings. * handler is running. Context switch code removes mappings.
*/ */
ctx->csa.dar = ctx->csa.dsisr = 0; ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
/* /*
* If we handled the fault successfully and are in runnable * If we handled the fault successfully and are in runnable
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "spufs.h" #include "spufs.h"
/* interrupt-level stop callback function. */ /* interrupt-level stop callback function. */
void spufs_stop_callback(struct spu *spu) void spufs_stop_callback(struct spu *spu, int irq)
{ {
struct spu_context *ctx = spu->ctx; struct spu_context *ctx = spu->ctx;
...@@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu) ...@@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu)
*/ */
if (ctx) { if (ctx) {
/* Copy exception arguments into module specific structure */ /* Copy exception arguments into module specific structure */
ctx->csa.class_0_pending = spu->class_0_pending; switch(irq) {
ctx->csa.dsisr = spu->dsisr; case 0 :
ctx->csa.dar = spu->dar; ctx->csa.class_0_pending = spu->class_0_pending;
ctx->csa.class_0_dsisr = spu->class_0_dsisr;
ctx->csa.class_0_dar = spu->class_0_dar;
break;
case 1 :
ctx->csa.class_1_dsisr = spu->class_1_dsisr;
ctx->csa.class_1_dar = spu->class_1_dar;
break;
case 2 :
break;
}
/* ensure that the exception status has hit memory before a /* ensure that the exception status has hit memory before a
* thread waiting on the context's stop queue is woken */ * thread waiting on the context's stop queue is woken */
...@@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu) ...@@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu)
wake_up_all(&ctx->stop_wq); wake_up_all(&ctx->stop_wq);
} }
/* Clear callback arguments from spu structure */
spu->class_0_pending = 0;
spu->dsisr = 0;
spu->dar = 0;
} }
int spu_stopped(struct spu_context *ctx, u32 *stat) int spu_stopped(struct spu_context *ctx, u32 *stat)
...@@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat) ...@@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
return 1; return 1;
dsisr = ctx->csa.dsisr; dsisr = ctx->csa.class_0_dsisr;
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
return 1;
dsisr = ctx->csa.class_1_dsisr;
if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
return 1; return 1;
......
...@@ -332,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data); ...@@ -332,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
/* irq callback funcs. */ /* irq callback funcs. */
void spufs_ibox_callback(struct spu *spu); void spufs_ibox_callback(struct spu *spu);
void spufs_wbox_callback(struct spu *spu); void spufs_wbox_callback(struct spu *spu);
void spufs_stop_callback(struct spu *spu); void spufs_stop_callback(struct spu *spu, int irq);
void spufs_mfc_callback(struct spu *spu); void spufs_mfc_callback(struct spu *spu);
void spufs_dma_callback(struct spu *spu, int type); void spufs_dma_callback(struct spu *spu, int type);
......
...@@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu) ...@@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%lx", ls_size); DUMP_FIELD(spu, "0x%lx", ls_size);
DUMP_FIELD(spu, "0x%x", node); DUMP_FIELD(spu, "0x%x", node);
DUMP_FIELD(spu, "0x%lx", flags); DUMP_FIELD(spu, "0x%lx", flags);
DUMP_FIELD(spu, "0x%lx", dar);
DUMP_FIELD(spu, "0x%lx", dsisr);
DUMP_FIELD(spu, "%d", class_0_pending); DUMP_FIELD(spu, "%d", class_0_pending);
DUMP_FIELD(spu, "0x%lx", class_0_dar);
DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
DUMP_FIELD(spu, "0x%lx", class_1_dar);
DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
DUMP_FIELD(spu, "0x%lx", irqs[0]); DUMP_FIELD(spu, "0x%lx", irqs[0]);
DUMP_FIELD(spu, "0x%lx", irqs[1]); DUMP_FIELD(spu, "0x%lx", irqs[1]);
DUMP_FIELD(spu, "0x%lx", irqs[2]); DUMP_FIELD(spu, "0x%lx", irqs[2]);
......
...@@ -128,9 +128,11 @@ struct spu { ...@@ -128,9 +128,11 @@ struct spu {
unsigned int irqs[3]; unsigned int irqs[3];
u32 node; u32 node;
u64 flags; u64 flags;
u64 dar;
u64 dsisr;
u64 class_0_pending; u64 class_0_pending;
u64 class_0_dar;
u64 class_0_dsisr;
u64 class_1_dar;
u64 class_1_dsisr;
size_t ls_size; size_t ls_size;
unsigned int slb_replace; unsigned int slb_replace;
struct mm_struct *mm; struct mm_struct *mm;
...@@ -143,7 +145,7 @@ struct spu { ...@@ -143,7 +145,7 @@ struct spu {
void (* wbox_callback)(struct spu *spu); void (* wbox_callback)(struct spu *spu);
void (* ibox_callback)(struct spu *spu); void (* ibox_callback)(struct spu *spu);
void (* stop_callback)(struct spu *spu); void (* stop_callback)(struct spu *spu, int irq);
void (* mfc_callback)(struct spu *spu); void (* mfc_callback)(struct spu *spu);
char irq_c0[8]; char irq_c0[8];
......
...@@ -254,7 +254,8 @@ struct spu_state { ...@@ -254,7 +254,8 @@ struct spu_state {
u64 spu_chnldata_RW[32]; u64 spu_chnldata_RW[32];
u32 spu_mailbox_data[4]; u32 spu_mailbox_data[4];
u32 pu_mailbox_data[1]; u32 pu_mailbox_data[1];
u64 dar, dsisr, class_0_pending; u64 class_0_dar, class_0_dsisr, class_0_pending;
u64 class_1_dar, class_1_dsisr;
unsigned long suspend_time; unsigned long suspend_time;
spinlock_t register_lock; spinlock_t register_lock;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment