Commit 390c5343 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Arnd Bergmann

[POWERPC] spufs: add memory barriers after set_bit

set_bit does not guarantee ordering on powerpc, so using it
for communication between threads requires explicit
mb() calls.
Signed-off-by: default avatarArnd Bergmann <arnd.bergmann@de.ibm.com>
parent e097b513
...@@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx) ...@@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx)
* Make sure the exiting bit is cleared. * Make sure the exiting bit is cleared.
*/ */
clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags); clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
mb();
queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
} }
} }
...@@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx) ...@@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx)
* makes sure it does not rearm itself anymore. * makes sure it does not rearm itself anymore.
*/ */
set_bit(SPU_SCHED_EXITING, &ctx->sched_flags); set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
mb();
cancel_delayed_work(&ctx->sched_work); cancel_delayed_work(&ctx->sched_work);
} }
} }
...@@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx) ...@@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx)
spin_lock(&spu_prio->runq_lock); spin_lock(&spu_prio->runq_lock);
list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
set_bit(ctx->prio, spu_prio->bitmap); set_bit(ctx->prio, spu_prio->bitmap);
mb();
spin_unlock(&spu_prio->runq_lock); spin_unlock(&spu_prio->runq_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment