Commit 3e3e13f3 authored by Ingo Molnar's avatar Ingo Molnar

sched: remove PREEMPT_RESTRICT

remove PREEMPT_RESTRICT. (this is a separate commit so that any
regression related to the removal itself is bisectable)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 52d3da1a
...@@ -863,7 +863,6 @@ struct sched_entity { ...@@ -863,7 +863,6 @@ struct sched_entity {
struct load_weight load; /* for load-balancing */ struct load_weight load; /* for load-balancing */
struct rb_node run_node; struct rb_node run_node;
unsigned int on_rq; unsigned int on_rq;
int peer_preempt;
u64 exec_start; u64 exec_start;
u64 sum_exec_runtime; u64 sum_exec_runtime;
......
...@@ -460,7 +460,6 @@ enum { ...@@ -460,7 +460,6 @@ enum {
SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_TREE_AVG = 4,
SCHED_FEAT_APPROX_AVG = 8, SCHED_FEAT_APPROX_AVG = 8,
SCHED_FEAT_WAKEUP_PREEMPT = 16, SCHED_FEAT_WAKEUP_PREEMPT = 16,
SCHED_FEAT_PREEMPT_RESTRICT = 32,
}; };
const_debug unsigned int sysctl_sched_features = const_debug unsigned int sysctl_sched_features =
...@@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = ...@@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features =
SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_START_DEBIT * 1 |
SCHED_FEAT_TREE_AVG * 0 | SCHED_FEAT_TREE_AVG * 0 |
SCHED_FEAT_APPROX_AVG * 0 | SCHED_FEAT_APPROX_AVG * 0 |
SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_WAKEUP_PREEMPT * 1;
SCHED_FEAT_PREEMPT_RESTRICT * 0;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
......
...@@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) ...@@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
update_stats_dequeue(cfs_rq, se); update_stats_dequeue(cfs_rq, se);
if (sleep) { if (sleep) {
se->peer_preempt = 0;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) { if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se); struct task_struct *tsk = task_of(se);
...@@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ...@@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
ideal_runtime = sched_slice(cfs_rq, curr); ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime || if (delta_exec > ideal_runtime)
(sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
resched_task(rq_of(cfs_rq)->curr); resched_task(rq_of(cfs_rq)->curr);
curr->peer_preempt = 0;
} }
static void static void
...@@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) ...@@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
gran = calc_delta_fair(gran, &se->load); gran = calc_delta_fair(gran, &se->load);
if (delta > gran) { if (delta > gran) {
int now = !sched_feat(PREEMPT_RESTRICT); if (p->prio < curr->prio)
if (now || p->prio < curr->prio || !se->peer_preempt++)
resched_task(curr); resched_task(curr);
} }
} }
...@@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) ...@@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
swap(curr->vruntime, se->vruntime); swap(curr->vruntime, se->vruntime);
} }
se->peer_preempt = 0;
enqueue_task_fair(rq, p, 0); enqueue_task_fair(rq, p, 0);
resched_task(rq->curr); resched_task(rq->curr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment