Commit 9b1d21f8 authored by Julio M. Merino Vidal's avatar Julio M. Merino Vidal Committed by Paul Mackerras

[POWERPC] spufs: fix typos in sched.c comments

Fix a few typos in the spufs scheduler comments
Signed-off-by: default avatarJulio M. Merino Vidal <jmerino@ac.upc.edu>
Signed-off-by: default avatarJeremy Kerr <jk@ozlabs.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent c25620d7
...@@ -105,15 +105,15 @@ void spu_set_timeslice(struct spu_context *ctx) ...@@ -105,15 +105,15 @@ void spu_set_timeslice(struct spu_context *ctx)
void __spu_update_sched_info(struct spu_context *ctx) void __spu_update_sched_info(struct spu_context *ctx)
{ {
/* /*
* 32-Bit assignment are atomic on powerpc, and we don't care about * 32-Bit assignments are atomic on powerpc, and we don't care about
* memory ordering here because retriving the controlling thread is * memory ordering here because retrieving the controlling thread is
* per defintion racy. * per definition racy.
*/ */
ctx->tid = current->pid; ctx->tid = current->pid;
/* /*
* We do our own priority calculations, so we normally want * We do our own priority calculations, so we normally want
* ->static_prio to start with. Unfortunately thies field * ->static_prio to start with. Unfortunately this field
* contains junk for threads with a realtime scheduling * contains junk for threads with a realtime scheduling
* policy so we have to look at ->prio in this case. * policy so we have to look at ->prio in this case.
*/ */
...@@ -127,7 +127,7 @@ void __spu_update_sched_info(struct spu_context *ctx) ...@@ -127,7 +127,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* A lot of places that don't hold list_mutex poke into * A lot of places that don't hold list_mutex poke into
* cpus_allowed, including grab_runnable_context which * cpus_allowed, including grab_runnable_context which
* already holds the runq_lock. So abuse runq_lock * already holds the runq_lock. So abuse runq_lock
* to protect this field aswell. * to protect this field as well.
*/ */
spin_lock(&spu_prio->runq_lock); spin_lock(&spu_prio->runq_lock);
ctx->cpus_allowed = current->cpus_allowed; ctx->cpus_allowed = current->cpus_allowed;
...@@ -182,7 +182,7 @@ static void notify_spus_active(void) ...@@ -182,7 +182,7 @@ static void notify_spus_active(void)
* Wake up the active spu_contexts. * Wake up the active spu_contexts.
* *
* When the awakened processes see their "notify_active" flag is set, * When the awakened processes see their "notify_active" flag is set,
* they will call spu_switch_notify(); * they will call spu_switch_notify().
*/ */
for_each_online_node(node) { for_each_online_node(node) {
struct spu *spu; struct spu *spu;
...@@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx) ...@@ -579,7 +579,7 @@ static struct spu *find_victim(struct spu_context *ctx)
/* /*
* Look for a possible preemption candidate on the local node first. * Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't * If there is no candidate look at the other nodes. This isn't
* exactly fair, but so far the whole spu schedule tries to keep * exactly fair, but so far the whole spu scheduler tries to keep
* a strong node affinity. We might want to fine-tune this in * a strong node affinity. We might want to fine-tune this in
* the future. * the future.
*/ */
...@@ -905,7 +905,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) ...@@ -905,7 +905,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
/* /*
* Note that last_pid doesn't really make much sense for the * Note that last_pid doesn't really make much sense for the
* SPU loadavg (it even seems very odd on the CPU side..), * SPU loadavg (it even seems very odd on the CPU side...),
* but we include it here to have a 100% compatible interface. * but we include it here to have a 100% compatible interface.
*/ */
seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment