Commit 05fa785c authored by Thomas Gleixner's avatar Thomas Gleixner

sched: Convert rq->lock to raw_spinlock

Convert locks which cannot be sleeping locks in preempt-rt to
raw_spinlocks.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
parent a2672459
This diff is collapsed.
......@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
spin_lock_irqsave(&rq->lock, flags);
raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
......@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
spin_unlock_irqrestore(&rq->lock, flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
......
......@@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p)
struct rq *rq = this_rq();
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);
......@@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p)
resched_task(rq->curr);
}
spin_unlock_irqrestore(&rq->lock, flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
......
......@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
spin_unlock_irq(&rq->lock);
raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
spin_lock_irq(&rq->lock);
raw_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
......
......@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
......@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
spin_lock_irqsave(&rq->lock, flags);
raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
spin_unlock_irqrestore(&rq->lock, flags);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
......@@ -524,7 +524,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
spin_lock(&rq->lock);
raw_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
......@@ -545,7 +545,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
spin_unlock(&rq->lock);
raw_spin_unlock(&rq->lock);
}
return idle;
......@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
spin_unlock(&lowest_rq->lock);
raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment