Commit 97fb7a0a authored by Ingo Molnar's avatar Ingo Molnar

sched: Clean up and harmonize the coding style of the scheduler code base

A good number of small style inconsistencies have accumulated
in the scheduler core, so do a pass over them to harmonize
all these details:

 - fix speling in comments,

 - use curly braces for multi-line statements,

 - remove unnecessary parentheses from integer literals,

 - capitalize consistently,

 - remove stray newlines,

 - add comments where necessary,

 - remove invalid/unnecessary comments,

 - align structure definitions and other data types vertically,

 - add missing newlines for increased readability,

 - fix vertical tabulation where it's misaligned,

 - harmonize preprocessor conditional block labeling
   and vertical alignment,

 - remove line-breaks where they uglify the code,

 - add newline after local variable definitions,

No change in functionality:

  md5:
     1191fa0a890cfa8132156d2959d7e9e2  built-in.o.before.asm
     1191fa0a890cfa8132156d2959d7e9e2  built-in.o.after.asm

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c2e51382
...@@ -168,18 +168,19 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) ...@@ -168,18 +168,19 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
autogroup_kref_put(prev); autogroup_kref_put(prev);
} }
/* Allocates GFP_KERNEL, cannot be called under any spinlock */ /* Allocates GFP_KERNEL, cannot be called under any spinlock: */
void sched_autogroup_create_attach(struct task_struct *p) void sched_autogroup_create_attach(struct task_struct *p)
{ {
struct autogroup *ag = autogroup_create(); struct autogroup *ag = autogroup_create();
autogroup_move_group(p, ag); autogroup_move_group(p, ag);
/* drop extra reference added by autogroup_create() */
/* Drop extra reference added by autogroup_create(): */
autogroup_kref_put(ag); autogroup_kref_put(ag);
} }
EXPORT_SYMBOL(sched_autogroup_create_attach); EXPORT_SYMBOL(sched_autogroup_create_attach);
/* Cannot be called under siglock. Currently has no users */ /* Cannot be called under siglock. Currently has no users: */
void sched_autogroup_detach(struct task_struct *p) void sched_autogroup_detach(struct task_struct *p)
{ {
autogroup_move_group(p, &autogroup_default); autogroup_move_group(p, &autogroup_default);
...@@ -202,7 +203,6 @@ static int __init setup_autogroup(char *str) ...@@ -202,7 +203,6 @@ static int __init setup_autogroup(char *str)
return 1; return 1;
} }
__setup("noautogroup", setup_autogroup); __setup("noautogroup", setup_autogroup);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
...@@ -224,7 +224,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) ...@@ -224,7 +224,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
if (nice < 0 && !can_nice(current, nice)) if (nice < 0 && !can_nice(current, nice))
return -EPERM; return -EPERM;
/* this is a heavy operation taking global locks.. */ /* This is a heavy operation, taking global locks.. */
if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
return -EAGAIN; return -EAGAIN;
...@@ -267,4 +267,4 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen) ...@@ -267,4 +267,4 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen)
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
} }
#endif /* CONFIG_SCHED_DEBUG */ #endif
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
struct autogroup { struct autogroup {
/* /*
* reference doesn't mean how many thread attach to this * Reference doesn't mean how many threads attach to this
* autogroup now. It just stands for the number of task * autogroup now. It just stands for the number of tasks
* could use this autogroup. * which could use this autogroup.
*/ */
struct kref kref; struct kref kref;
struct task_group *tg; struct task_group *tg;
...@@ -56,11 +56,9 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg) ...@@ -56,11 +56,9 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg)
return tg; return tg;
} }
#ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{ {
return 0; return 0;
} }
#endif
#endif /* CONFIG_SCHED_AUTOGROUP */ #endif /* CONFIG_SCHED_AUTOGROUP */
/* /*
* sched_clock for unstable cpu clocks * sched_clock() for unstable CPU clocks
* *
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
* *
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* Guillaume Chazarain <guichaz@gmail.com> * Guillaume Chazarain <guichaz@gmail.com>
* *
* *
* What: * What this file implements:
* *
* cpu_clock(i) provides a fast (execution time) high resolution * cpu_clock(i) provides a fast (execution time) high resolution
* clock with bounded drift between CPUs. The value of cpu_clock(i) * clock with bounded drift between CPUs. The value of cpu_clock(i)
...@@ -26,11 +26,11 @@ ...@@ -26,11 +26,11 @@
* at 0 on boot (but people really shouldn't rely on that). * at 0 on boot (but people really shouldn't rely on that).
* *
* cpu_clock(i) -- can be used from any context, including NMI. * cpu_clock(i) -- can be used from any context, including NMI.
* local_clock() -- is cpu_clock() on the current cpu. * local_clock() -- is cpu_clock() on the current CPU.
* *
* sched_clock_cpu(i) * sched_clock_cpu(i)
* *
* How: * How it is implemented:
* *
* The implementation either uses sched_clock() when * The implementation either uses sched_clock() when
* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
...@@ -302,21 +302,21 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) ...@@ -302,21 +302,21 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
* cmpxchg64 below only protects one readout. * cmpxchg64 below only protects one readout.
* *
* We must reread via sched_clock_local() in the retry case on * We must reread via sched_clock_local() in the retry case on
* 32bit as an NMI could use sched_clock_local() via the * 32-bit kernels as an NMI could use sched_clock_local() via the
* tracer and hit between the readout of * tracer and hit between the readout of
* the low32bit and the high 32bit portion. * the low 32-bit and the high 32-bit portion.
*/ */
this_clock = sched_clock_local(my_scd); this_clock = sched_clock_local(my_scd);
/* /*
* We must enforce atomic readout on 32bit, otherwise the * We must enforce atomic readout on 32-bit, otherwise the
* update on the remote cpu can hit inbetween the readout of * update on the remote CPU can hit inbetween the readout of
* the low32bit and the high 32bit portion. * the low 32-bit and the high 32-bit portion.
*/ */
remote_clock = cmpxchg64(&scd->clock, 0, 0); remote_clock = cmpxchg64(&scd->clock, 0, 0);
#else #else
/* /*
* On 64bit the read of [my]scd->clock is atomic versus the * On 64-bit kernels the read of [my]scd->clock is atomic versus the
* update, so we can avoid the above 32bit dance. * update, so we can avoid the above 32-bit dance.
*/ */
sched_clock_local(my_scd); sched_clock_local(my_scd);
again: again:
......
...@@ -135,7 +135,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ...@@ -135,7 +135,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* [L] ->on_rq * [L] ->on_rq
* RELEASE (rq->lock) * RELEASE (rq->lock)
* *
* If we observe the old cpu in task_rq_lock, the acquire of * If we observe the old CPU in task_rq_lock, the acquire of
* the old rq->lock will fully serialize against the stores. * the old rq->lock will fully serialize against the stores.
* *
* If we observe the new CPU in task_rq_lock, the acquire will * If we observe the new CPU in task_rq_lock, the acquire will
...@@ -1457,7 +1457,7 @@ EXPORT_SYMBOL_GPL(kick_process); ...@@ -1457,7 +1457,7 @@ EXPORT_SYMBOL_GPL(kick_process);
* *
* - cpu_active must be a subset of cpu_online * - cpu_active must be a subset of cpu_online
* *
* - on cpu-up we allow per-cpu kthreads on the online && !active cpu, * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
* see __set_cpus_allowed_ptr(). At this point the newly online * see __set_cpus_allowed_ptr(). At this point the newly online
* CPU isn't yet part of the sched domains, and balancing will not * CPU isn't yet part of the sched domains, and balancing will not
* see it. * see it.
...@@ -3037,7 +3037,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) ...@@ -3037,7 +3037,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/* /*
* 64-bit doesn't need locks to atomically read a 64bit value. * 64-bit doesn't need locks to atomically read a 64-bit value.
* So we have a optimization chance when the task's delta_exec is 0. * So we have a optimization chance when the task's delta_exec is 0.
* Reading ->on_cpu is racy, but this is ok. * Reading ->on_cpu is racy, but this is ok.
* *
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* (balbir@in.ibm.com). * (balbir@in.ibm.com).
*/ */
/* Time spent by the tasks of the cpu accounting group executing in ... */ /* Time spent by the tasks of the CPU accounting group executing in ... */
enum cpuacct_stat_index { enum cpuacct_stat_index {
CPUACCT_STAT_USER, /* ... user mode */ CPUACCT_STAT_USER, /* ... user mode */
CPUACCT_STAT_SYSTEM, /* ... kernel mode */ CPUACCT_STAT_SYSTEM, /* ... kernel mode */
...@@ -35,12 +35,12 @@ struct cpuacct_usage { ...@@ -35,12 +35,12 @@ struct cpuacct_usage {
u64 usages[CPUACCT_STAT_NSTATS]; u64 usages[CPUACCT_STAT_NSTATS];
}; };
/* track cpu usage of a group of tasks and its child groups */ /* track CPU usage of a group of tasks and its child groups */
struct cpuacct { struct cpuacct {
struct cgroup_subsys_state css; struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */ /* cpuusage holds pointer to a u64-type object on every CPU */
struct cpuacct_usage __percpu *cpuusage; struct cpuacct_usage __percpu *cpuusage;
struct kernel_cpustat __percpu *cpustat; struct kernel_cpustat __percpu *cpustat;
}; };
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
...@@ -48,7 +48,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) ...@@ -48,7 +48,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
return css ? container_of(css, struct cpuacct, css) : NULL; return css ? container_of(css, struct cpuacct, css) : NULL;
} }
/* return cpu accounting group to which this task belongs */ /* Return CPU accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk) static inline struct cpuacct *task_ca(struct task_struct *tsk)
{ {
return css_ca(task_css(tsk, cpuacct_cgrp_id)); return css_ca(task_css(tsk, cpuacct_cgrp_id));
...@@ -65,7 +65,7 @@ static struct cpuacct root_cpuacct = { ...@@ -65,7 +65,7 @@ static struct cpuacct root_cpuacct = {
.cpuusage = &root_cpuacct_cpuusage, .cpuusage = &root_cpuacct_cpuusage,
}; };
/* create a new cpu accounting group */ /* Create a new CPU accounting group */
static struct cgroup_subsys_state * static struct cgroup_subsys_state *
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
...@@ -96,7 +96,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) ...@@ -96,7 +96,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
/* destroy an existing cpu accounting group */ /* Destroy an existing CPU accounting group */
static void cpuacct_css_free(struct cgroup_subsys_state *css) static void cpuacct_css_free(struct cgroup_subsys_state *css)
{ {
struct cpuacct *ca = css_ca(css); struct cpuacct *ca = css_ca(css);
...@@ -162,7 +162,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) ...@@ -162,7 +162,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
#endif #endif
} }
/* return total cpu usage (in nanoseconds) of a group */ /* Return total CPU usage (in nanoseconds) of a group */
static u64 __cpuusage_read(struct cgroup_subsys_state *css, static u64 __cpuusage_read(struct cgroup_subsys_state *css,
enum cpuacct_stat_index index) enum cpuacct_stat_index index)
{ {
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
* as published by the Free Software Foundation; version 2 * as published by the Free Software Foundation; version 2
* of the License. * of the License.
*/ */
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -147,9 +146,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -147,9 +146,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
} }
/* /*
* cpudl_clear - remove a cpu from the cpudl max-heap * cpudl_clear - remove a CPU from the cpudl max-heap
* @cp: the cpudl max-heap context * @cp: the cpudl max-heap context
* @cpu: the target cpu * @cpu: the target CPU
* *
* Notes: assumes cpu_rq(cpu)->lock is locked * Notes: assumes cpu_rq(cpu)->lock is locked
* *
...@@ -188,8 +187,8 @@ void cpudl_clear(struct cpudl *cp, int cpu) ...@@ -188,8 +187,8 @@ void cpudl_clear(struct cpudl *cp, int cpu)
/* /*
* cpudl_set - update the cpudl max-heap * cpudl_set - update the cpudl max-heap
* @cp: the cpudl max-heap context * @cp: the cpudl max-heap context
* @cpu: the target cpu * @cpu: the target CPU
* @dl: the new earliest deadline for this cpu * @dl: the new earliest deadline for this CPU
* *
* Notes: assumes cpu_rq(cpu)->lock is locked * Notes: assumes cpu_rq(cpu)->lock is locked
* *
...@@ -224,7 +223,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) ...@@ -224,7 +223,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
/* /*
* cpudl_set_freecpu - Set the cpudl.free_cpus * cpudl_set_freecpu - Set the cpudl.free_cpus
* @cp: the cpudl max-heap context * @cp: the cpudl max-heap context
* @cpu: rd attached cpu * @cpu: rd attached CPU
*/ */
void cpudl_set_freecpu(struct cpudl *cp, int cpu) void cpudl_set_freecpu(struct cpudl *cp, int cpu)
{ {
...@@ -234,7 +233,7 @@ void cpudl_set_freecpu(struct cpudl *cp, int cpu) ...@@ -234,7 +233,7 @@ void cpudl_set_freecpu(struct cpudl *cp, int cpu)
/* /*
* cpudl_clear_freecpu - Clear the cpudl.free_cpus * cpudl_clear_freecpu - Clear the cpudl.free_cpus
* @cp: the cpudl max-heap context * @cp: the cpudl max-heap context
* @cpu: rd attached cpu * @cpu: rd attached CPU
*/ */
void cpudl_clear_freecpu(struct cpudl *cp, int cpu) void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
{ {
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUDL_H
#define _LINUX_CPUDL_H
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/deadline.h> #include <linux/sched/deadline.h>
#define IDX_INVALID -1 #define IDX_INVALID -1
struct cpudl_item { struct cpudl_item {
u64 dl; u64 dl;
int cpu; int cpu;
int idx; int idx;
}; };
struct cpudl { struct cpudl {
raw_spinlock_t lock; raw_spinlock_t lock;
int size; int size;
cpumask_var_t free_cpus; cpumask_var_t free_cpus;
struct cpudl_item *elements; struct cpudl_item *elements;
}; };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpudl_find(struct cpudl *cp, struct task_struct *p, int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *later_mask);
struct cpumask *later_mask);
void cpudl_set(struct cpudl *cp, int cpu, u64 dl); void cpudl_set(struct cpudl *cp, int cpu, u64 dl);
void cpudl_clear(struct cpudl *cp, int cpu); void cpudl_clear(struct cpudl *cp, int cpu);
int cpudl_init(struct cpudl *cp); int cpudl_init(struct cpudl *cp);
void cpudl_set_freecpu(struct cpudl *cp, int cpu); void cpudl_set_freecpu(struct cpudl *cp, int cpu);
void cpudl_clear_freecpu(struct cpudl *cp, int cpu); void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
void cpudl_cleanup(struct cpudl *cp); void cpudl_cleanup(struct cpudl *cp);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* _LINUX_CPUDL_H */
...@@ -20,52 +20,52 @@ ...@@ -20,52 +20,52 @@
#include "sched.h" #include "sched.h"
struct sugov_tunables { struct sugov_tunables {
struct gov_attr_set attr_set; struct gov_attr_set attr_set;
unsigned int rate_limit_us; unsigned int rate_limit_us;
}; };
struct sugov_policy { struct sugov_policy {
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
struct sugov_tunables *tunables; struct sugov_tunables *tunables;
struct list_head tunables_hook; struct list_head tunables_hook;
raw_spinlock_t update_lock; /* For shared policies */ raw_spinlock_t update_lock; /* For shared policies */
u64 last_freq_update_time; u64 last_freq_update_time;
s64 freq_update_delay_ns; s64 freq_update_delay_ns;
unsigned int next_freq; unsigned int next_freq;
unsigned int cached_raw_freq; unsigned int cached_raw_freq;
/* The next fields are only needed if fast switch cannot be used. */ /* The next fields are only needed if fast switch cannot be used: */
struct irq_work irq_work; struct irq_work irq_work;
struct kthread_work work; struct kthread_work work;
struct mutex work_lock; struct mutex work_lock;
struct kthread_worker worker; struct kthread_worker worker;
struct task_struct *thread; struct task_struct *thread;
bool work_in_progress; bool work_in_progress;
bool need_freq_update; bool need_freq_update;
}; };
struct sugov_cpu { struct sugov_cpu {
struct update_util_data update_util; struct update_util_data update_util;
struct sugov_policy *sg_policy; struct sugov_policy *sg_policy;
unsigned int cpu; unsigned int cpu;
bool iowait_boost_pending; bool iowait_boost_pending;
unsigned int iowait_boost; unsigned int iowait_boost;
unsigned int iowait_boost_max; unsigned int iowait_boost_max;
u64 last_update; u64 last_update;
/* The fields below are only needed when sharing a policy. */ /* The fields below are only needed when sharing a policy: */
unsigned long util_cfs; unsigned long util_cfs;
unsigned long util_dl; unsigned long util_dl;
unsigned long max; unsigned long max;
unsigned int flags; unsigned int flags;
/* The field below is for single-CPU policies only. */ /* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
unsigned long saved_idle_calls; unsigned long saved_idle_calls;
#endif #endif
}; };
...@@ -79,9 +79,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) ...@@ -79,9 +79,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
/* /*
* Since cpufreq_update_util() is called with rq->lock held for * Since cpufreq_update_util() is called with rq->lock held for
* the @target_cpu, our per-cpu data is fully serialized. * the @target_cpu, our per-CPU data is fully serialized.
* *
* However, drivers cannot in general deal with cross-cpu * However, drivers cannot in general deal with cross-CPU
* requests, so while get_next_freq() will work, our * requests, so while get_next_freq() will work, our
* sugov_update_commit() call may not for the fast switching platforms. * sugov_update_commit() call may not for the fast switching platforms.
* *
...@@ -111,6 +111,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) ...@@ -111,6 +111,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
} }
delta_ns = time - sg_policy->last_freq_update_time; delta_ns = time - sg_policy->last_freq_update_time;
return delta_ns >= sg_policy->freq_update_delay_ns; return delta_ns >= sg_policy->freq_update_delay_ns;
} }
...@@ -345,8 +346,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) ...@@ -345,8 +346,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
return get_next_freq(sg_policy, util, max); return get_next_freq(sg_policy, util, max);
} }
static void sugov_update_shared(struct update_util_data *hook, u64 time, static void
unsigned int flags) sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
{ {
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
...@@ -423,8 +424,8 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) ...@@ -423,8 +424,8 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
return sprintf(buf, "%u\n", tunables->rate_limit_us); return sprintf(buf, "%u\n", tunables->rate_limit_us);
} }
static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, static ssize_t
size_t count) rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
{ {
struct sugov_tunables *tunables = to_sugov_tunables(attr_set); struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
struct sugov_policy *sg_policy; struct sugov_policy *sg_policy;
...@@ -479,11 +480,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) ...@@ -479,11 +480,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
{ {
struct task_struct *thread; struct task_struct *thread;
struct sched_attr attr = { struct sched_attr attr = {
.size = sizeof(struct sched_attr), .size = sizeof(struct sched_attr),
.sched_policy = SCHED_DEADLINE, .sched_policy = SCHED_DEADLINE,
.sched_flags = SCHED_FLAG_SUGOV, .sched_flags = SCHED_FLAG_SUGOV,
.sched_nice = 0, .sched_nice = 0,
.sched_priority = 0, .sched_priority = 0,
/* /*
* Fake (unused) bandwidth; workaround to "fix" * Fake (unused) bandwidth; workaround to "fix"
* priority inheritance. * priority inheritance.
...@@ -663,21 +664,21 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -663,21 +664,21 @@ static int sugov_start(struct cpufreq_policy *policy)
struct sugov_policy *sg_policy = policy->governor_data; struct sugov_policy *sg_policy = policy->governor_data;
unsigned int cpu; unsigned int cpu;
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
sg_policy->last_freq_update_time = 0; sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = UINT_MAX; sg_policy->next_freq = UINT_MAX;
sg_policy->work_in_progress = false; sg_policy->work_in_progress = false;
sg_policy->need_freq_update = false; sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0; sg_policy->cached_raw_freq = 0;
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
memset(sg_cpu, 0, sizeof(*sg_cpu)); memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu; sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy; sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = 0; sg_cpu->flags = 0;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
} }
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
...@@ -721,14 +722,14 @@ static void sugov_limits(struct cpufreq_policy *policy) ...@@ -721,14 +722,14 @@ static void sugov_limits(struct cpufreq_policy *policy)
} }
static struct cpufreq_governor schedutil_gov = { static struct cpufreq_governor schedutil_gov = {
.name = "schedutil", .name = "schedutil",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.dynamic_switching = true, .dynamic_switching = true,
.init = sugov_init, .init = sugov_init,
.exit = sugov_exit, .exit = sugov_exit,
.start = sugov_start, .start = sugov_start,
.stop = sugov_stop, .stop = sugov_stop,
.limits = sugov_limits, .limits = sugov_limits,
}; };
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
* *
* going from the lowest priority to the highest. CPUs in the INVALID state * going from the lowest priority to the highest. CPUs in the INVALID state
* are not eligible for routing. The system maintains this state with * are not eligible for routing. The system maintains this state with
* a 2 dimensional bitmap (the first for priority class, the second for cpus * a 2 dimensional bitmap (the first for priority class, the second for CPUs
* in that class). Therefore a typical application without affinity * in that class). Therefore a typical application without affinity
* restrictions can find a suitable CPU with O(1) complexity (e.g. two bit * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
* searches). For tasks with affinity restrictions, the algorithm has a * searches). For tasks with affinity restrictions, the algorithm has a
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
* as published by the Free Software Foundation; version 2 * as published by the Free Software Foundation; version 2
* of the License. * of the License.
*/ */
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/rt.h> #include <linux/sched/rt.h>
...@@ -128,9 +127,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, ...@@ -128,9 +127,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
} }
/** /**
* cpupri_set - update the cpu priority setting * cpupri_set - update the CPU priority setting
* @cp: The cpupri context * @cp: The cpupri context
* @cpu: The target cpu * @cpu: The target CPU
* @newpri: The priority (INVALID-RT99) to assign to this CPU * @newpri: The priority (INVALID-RT99) to assign to this CPU
* *
* Note: Assumes cpu_rq(cpu)->lock is locked * Note: Assumes cpu_rq(cpu)->lock is locked
...@@ -151,7 +150,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) ...@@ -151,7 +150,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
return; return;
/* /*
* If the cpu was currently mapped to a different value, we * If the CPU was currently mapped to a different value, we
* need to map it to the new value then remove the old value. * need to map it to the new value then remove the old value.
* Note, we must add the new value first, otherwise we risk the * Note, we must add the new value first, otherwise we risk the
* cpu being missed by the priority loop in cpupri_find. * cpu being missed by the priority loop in cpupri_find.
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUPRI_H
#define _LINUX_CPUPRI_H
#include <linux/sched.h> #include <linux/sched.h>
#define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2) #define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2)
#define CPUPRI_INVALID -1 #define CPUPRI_INVALID -1
#define CPUPRI_IDLE 0 #define CPUPRI_IDLE 0
#define CPUPRI_NORMAL 1 #define CPUPRI_NORMAL 1
/* values 2-101 are RT priorities 0-99 */ /* values 2-101 are RT priorities 0-99 */
struct cpupri_vec { struct cpupri_vec {
atomic_t count; atomic_t count;
cpumask_var_t mask; cpumask_var_t mask;
}; };
struct cpupri { struct cpupri {
struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
int *cpu_to_pri; int *cpu_to_pri;
}; };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpupri_find(struct cpupri *cp, int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask);
struct task_struct *p, struct cpumask *lowest_mask);
void cpupri_set(struct cpupri *cp, int cpu, int pri); void cpupri_set(struct cpupri *cp, int cpu, int pri);
int cpupri_init(struct cpupri *cp); int cpupri_init(struct cpupri *cp);
void cpupri_cleanup(struct cpupri *cp); void cpupri_cleanup(struct cpupri *cp);
#endif #endif
#endif /* _LINUX_CPUPRI_H */
...@@ -113,9 +113,9 @@ static inline void task_group_account_field(struct task_struct *p, int index, ...@@ -113,9 +113,9 @@ static inline void task_group_account_field(struct task_struct *p, int index,
} }
/* /*
* Account user cpu time to a process. * Account user CPU time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @cputime: the cpu time spent in user space since the last update * @cputime: the CPU time spent in user space since the last update
*/ */
void account_user_time(struct task_struct *p, u64 cputime) void account_user_time(struct task_struct *p, u64 cputime)
{ {
...@@ -135,9 +135,9 @@ void account_user_time(struct task_struct *p, u64 cputime) ...@@ -135,9 +135,9 @@ void account_user_time(struct task_struct *p, u64 cputime)
} }
/* /*
* Account guest cpu time to a process. * Account guest CPU time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update * @cputime: the CPU time spent in virtual machine since the last update
*/ */
void account_guest_time(struct task_struct *p, u64 cputime) void account_guest_time(struct task_struct *p, u64 cputime)
{ {
...@@ -159,9 +159,9 @@ void account_guest_time(struct task_struct *p, u64 cputime) ...@@ -159,9 +159,9 @@ void account_guest_time(struct task_struct *p, u64 cputime)
} }
/* /*
* Account system cpu time to a process and desired cpustat field * Account system CPU time to a process and desired cpustat field
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @cputime: the cpu time spent in kernel space since the last update * @cputime: the CPU time spent in kernel space since the last update
* @index: pointer to cpustat field that has to be updated * @index: pointer to cpustat field that has to be updated
*/ */
void account_system_index_time(struct task_struct *p, void account_system_index_time(struct task_struct *p,
...@@ -179,10 +179,10 @@ void account_system_index_time(struct task_struct *p, ...@@ -179,10 +179,10 @@ void account_system_index_time(struct task_struct *p,
} }
/* /*
* Account system cpu time to a process. * Account system CPU time to a process.
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count() * @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update * @cputime: the CPU time spent in kernel space since the last update
*/ */
void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
{ {
...@@ -205,7 +205,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime) ...@@ -205,7 +205,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
/* /*
* Account for involuntary wait time. * Account for involuntary wait time.
* @cputime: the cpu time spent in involuntary wait * @cputime: the CPU time spent in involuntary wait
*/ */
void account_steal_time(u64 cputime) void account_steal_time(u64 cputime)
{ {
...@@ -216,7 +216,7 @@ void account_steal_time(u64 cputime) ...@@ -216,7 +216,7 @@ void account_steal_time(u64 cputime)
/* /*
* Account for idle time. * Account for idle time.
* @cputime: the cpu time spent in idle wait * @cputime: the CPU time spent in idle wait
*/ */
void account_idle_time(u64 cputime) void account_idle_time(u64 cputime)
{ {
...@@ -338,7 +338,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ...@@ -338,7 +338,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
#ifdef CONFIG_IRQ_TIME_ACCOUNTING #ifdef CONFIG_IRQ_TIME_ACCOUNTING
/* /*
* Account a tick to a process and cpustat * Account a tick to a process and cpustat
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @user_tick: is the tick from userspace * @user_tick: is the tick from userspace
* @rq: the pointer to rq * @rq: the pointer to rq
* *
...@@ -400,17 +400,16 @@ static void irqtime_account_idle_ticks(int ticks) ...@@ -400,17 +400,16 @@ static void irqtime_account_idle_ticks(int ticks)
irqtime_account_process_tick(current, 0, rq, ticks); irqtime_account_process_tick(current, 0, rq, ticks);
} }
#else /* CONFIG_IRQ_TIME_ACCOUNTING */ #else /* CONFIG_IRQ_TIME_ACCOUNTING */
static inline void irqtime_account_idle_ticks(int ticks) {} static inline void irqtime_account_idle_ticks(int ticks) { }
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq, int nr_ticks) {} struct rq *rq, int nr_ticks) { }
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
/* /*
* Use precise platform statistics if available: * Use precise platform statistics if available:
*/ */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
void vtime_common_task_switch(struct task_struct *prev) void vtime_common_task_switch(struct task_struct *prev)
{ {
if (is_idle_task(prev)) if (is_idle_task(prev))
...@@ -421,8 +420,7 @@ void vtime_common_task_switch(struct task_struct *prev) ...@@ -421,8 +420,7 @@ void vtime_common_task_switch(struct task_struct *prev)
vtime_flush(prev); vtime_flush(prev);
arch_vtime_task_switch(prev); arch_vtime_task_switch(prev);
} }
#endif # endif
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
...@@ -469,10 +467,12 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) ...@@ -469,10 +467,12 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*ut = cputime.utime; *ut = cputime.utime;
*st = cputime.stime; *st = cputime.stime;
} }
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
/* /*
* Account a single tick of cpu time. * Account a single tick of CPU time.
* @p: the process that the cpu time gets accounted to * @p: the process that the CPU time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick * @user_tick: indicates if the tick is a user or a system tick
*/ */
void account_process_tick(struct task_struct *p, int user_tick) void account_process_tick(struct task_struct *p, int user_tick)
......
...@@ -539,12 +539,12 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p ...@@ -539,12 +539,12 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
/* /*
* If we cannot preempt any rq, fall back to pick any * If we cannot preempt any rq, fall back to pick any
* online cpu. * online CPU:
*/ */
cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
/* /*
* Fail to find any suitable cpu. * Failed to find any suitable CPU.
* The task will never come back! * The task will never come back!
*/ */
BUG_ON(dl_bandwidth_enabled()); BUG_ON(dl_bandwidth_enabled());
...@@ -608,8 +608,7 @@ static inline void queue_pull_task(struct rq *rq) ...@@ -608,8 +608,7 @@ static inline void queue_pull_task(struct rq *rq)
static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
int flags);
/* /*
* We are being explicitly informed that a new instance is starting, * We are being explicitly informed that a new instance is starting,
...@@ -1873,7 +1872,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -1873,7 +1872,7 @@ static int find_later_rq(struct task_struct *task)
/* /*
* We have to consider system topology and task affinity * We have to consider system topology and task affinity
* first, then we can look for a suitable cpu. * first, then we can look for a suitable CPU.
*/ */
if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
return -1; return -1;
...@@ -1887,7 +1886,7 @@ static int find_later_rq(struct task_struct *task) ...@@ -1887,7 +1886,7 @@ static int find_later_rq(struct task_struct *task)
* Now we check how well this matches with task's * Now we check how well this matches with task's
* affinity and system topology. * affinity and system topology.
* *
* The last cpu where the task run is our first * The last CPU where the task run is our first
* guess, since it is most likely cache-hot there. * guess, since it is most likely cache-hot there.
*/ */
if (cpumask_test_cpu(cpu, later_mask)) if (cpumask_test_cpu(cpu, later_mask))
...@@ -1917,9 +1916,9 @@ static int find_later_rq(struct task_struct *task) ...@@ -1917,9 +1916,9 @@ static int find_later_rq(struct task_struct *task)
best_cpu = cpumask_first_and(later_mask, best_cpu = cpumask_first_and(later_mask,
sched_domain_span(sd)); sched_domain_span(sd));
/* /*
* Last chance: if a cpu being in both later_mask * Last chance: if a CPU being in both later_mask
* and current sd span is valid, that becomes our * and current sd span is valid, that becomes our
* choice. Of course, the latest possible cpu is * choice. Of course, the latest possible CPU is
* already under consideration through later_mask. * already under consideration through later_mask.
*/ */
if (best_cpu < nr_cpu_ids) { if (best_cpu < nr_cpu_ids) {
...@@ -2075,7 +2074,7 @@ static int push_dl_task(struct rq *rq) ...@@ -2075,7 +2074,7 @@ static int push_dl_task(struct rq *rq)
if (task == next_task) { if (task == next_task) {
/* /*
* The task is still there. We don't try * The task is still there. We don't try
* again, some other cpu will pull it when ready. * again, some other CPU will pull it when ready.
*/ */
goto out; goto out;
} }
...@@ -2308,7 +2307,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) ...@@ -2308,7 +2307,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
/* /*
* Since this might be the only -deadline task on the rq, * Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one * this is the right place to try to pull some other one
* from an overloaded cpu, if any. * from an overloaded CPU, if any.
*/ */
if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
return; return;
...@@ -2634,17 +2633,17 @@ void __dl_clear_params(struct task_struct *p) ...@@ -2634,17 +2633,17 @@ void __dl_clear_params(struct task_struct *p)
{ {
struct sched_dl_entity *dl_se = &p->dl; struct sched_dl_entity *dl_se = &p->dl;
dl_se->dl_runtime = 0; dl_se->dl_runtime = 0;
dl_se->dl_deadline = 0; dl_se->dl_deadline = 0;
dl_se->dl_period = 0; dl_se->dl_period = 0;
dl_se->flags = 0; dl_se->flags = 0;
dl_se->dl_bw = 0; dl_se->dl_bw = 0;
dl_se->dl_density = 0; dl_se->dl_density = 0;
dl_se->dl_throttled = 0; dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0; dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0; dl_se->dl_non_contending = 0;
dl_se->dl_overrun = 0; dl_se->dl_overrun = 0;
} }
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
...@@ -2663,21 +2662,22 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) ...@@ -2663,21 +2662,22 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
{ {
unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, unsigned int dest_cpu;
cs_cpus_allowed);
struct dl_bw *dl_b; struct dl_bw *dl_b;
bool overflow; bool overflow;
int cpus, ret; int cpus, ret;
unsigned long flags; unsigned long flags;
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
rcu_read_lock_sched(); rcu_read_lock_sched();
dl_b = dl_bw_of(dest_cpu); dl_b = dl_bw_of(dest_cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(dest_cpu); cpus = dl_bw_cpus(dest_cpu);
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
if (overflow) if (overflow) {
ret = -EBUSY; ret = -EBUSY;
else { } else {
/* /*
* We reserve space for this task in the destination * We reserve space for this task in the destination
* root_domain, as we can't fail after this point. * root_domain, as we can't fail after this point.
...@@ -2689,6 +2689,7 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo ...@@ -2689,6 +2689,7 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo
} }
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
return ret; return ret;
} }
...@@ -2709,6 +2710,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, ...@@ -2709,6 +2710,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
ret = 0; ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
return ret; return ret;
} }
...@@ -2726,6 +2728,7 @@ bool dl_cpu_busy(unsigned int cpu) ...@@ -2726,6 +2728,7 @@ bool dl_cpu_busy(unsigned int cpu)
overflow = __dl_overflow(dl_b, cpus, 0, 0); overflow = __dl_overflow(dl_b, cpus, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
return overflow; return overflow;
} }
#endif #endif
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/sched/task.h> #include <linux/sched/task.h>
...@@ -274,34 +273,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) ...@@ -274,34 +273,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
if (table == NULL) if (table == NULL)
return NULL; return NULL;
set_table_entry(&table[0], "min_interval", &sd->min_interval, set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[1], "max_interval", &sd->max_interval, set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
set_table_entry(&table[2], "busy_idx", &sd->busy_idx, set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
set_table_entry(&table[3], "idle_idx", &sd->idle_idx, set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false);
set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false);
sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false);
set_table_entry(&table[5], "wake_idx", &sd->wake_idx, set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false);
sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false);
sizeof(int), 0644, proc_dointvec_minmax, true);
set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[9], "cache_nice_tries",
&sd->cache_nice_tries,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[10], "flags", &sd->flags,
sizeof(int), 0644, proc_dointvec_minmax, false);
set_table_entry(&table[11], "max_newidle_lb_cost",
&sd->max_newidle_lb_cost,
sizeof(long), 0644, proc_doulongvec_minmax, false);
set_table_entry(&table[12], "name", sd->name,
CORENAME_MAX_SIZE, 0444, proc_dostring, false);
/* &table[13] is terminator */ /* &table[13] is terminator */
return table; return table;
...@@ -332,8 +316,8 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ...@@ -332,8 +316,8 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
return table; return table;
} }
static cpumask_var_t sd_sysctl_cpus; static cpumask_var_t sd_sysctl_cpus;
static struct ctl_table_header *sd_sysctl_header; static struct ctl_table_header *sd_sysctl_header;
void register_sched_domain_sysctl(void) void register_sched_domain_sysctl(void)
{ {
...@@ -413,14 +397,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group ...@@ -413,14 +397,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
{ {
struct sched_entity *se = tg->se[cpu]; struct sched_entity *se = tg->se[cpu];
#define P(F) \ #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
#define P_SCHEDSTAT(F) \ #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
if (!se) if (!se)
return; return;
...@@ -428,6 +408,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group ...@@ -428,6 +408,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN(se->exec_start); PN(se->exec_start);
PN(se->vruntime); PN(se->vruntime);
PN(se->sum_exec_runtime); PN(se->sum_exec_runtime);
if (schedstat_enabled()) { if (schedstat_enabled()) {
PN_SCHEDSTAT(se->statistics.wait_start); PN_SCHEDSTAT(se->statistics.wait_start);
PN_SCHEDSTAT(se->statistics.sleep_start); PN_SCHEDSTAT(se->statistics.sleep_start);
...@@ -440,6 +421,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group ...@@ -440,6 +421,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN_SCHEDSTAT(se->statistics.wait_sum); PN_SCHEDSTAT(se->statistics.wait_sum);
P_SCHEDSTAT(se->statistics.wait_count); P_SCHEDSTAT(se->statistics.wait_count);
} }
P(se->load.weight); P(se->load.weight);
P(se->runnable_weight); P(se->runnable_weight);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -464,6 +446,7 @@ static char *task_group_path(struct task_group *tg) ...@@ -464,6 +446,7 @@ static char *task_group_path(struct task_group *tg)
return group_path; return group_path;
cgroup_path(tg->css.cgroup, group_path, PATH_MAX); cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
return group_path; return group_path;
} }
#endif #endif
...@@ -799,9 +782,9 @@ void sysrq_sched_debug_show(void) ...@@ -799,9 +782,9 @@ void sysrq_sched_debug_show(void)
/* /*
* This itererator needs some explanation. * This itererator needs some explanation.
* It returns 1 for the header position. * It returns 1 for the header position.
* This means 2 is cpu 0. * This means 2 is CPU 0.
* In a hotplugged system some cpus, including cpu 0, may be missing so we have * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
* to use cpumask_* to iterate over the cpus. * to use cpumask_* to iterate over the CPUs.
*/ */
static void *sched_debug_start(struct seq_file *file, loff_t *offset) static void *sched_debug_start(struct seq_file *file, loff_t *offset)
{ {
...@@ -821,6 +804,7 @@ static void *sched_debug_start(struct seq_file *file, loff_t *offset) ...@@ -821,6 +804,7 @@ static void *sched_debug_start(struct seq_file *file, loff_t *offset)
if (n < nr_cpu_ids) if (n < nr_cpu_ids)
return (void *)(unsigned long)(n + 2); return (void *)(unsigned long)(n + 2);
return NULL; return NULL;
} }
...@@ -835,10 +819,10 @@ static void sched_debug_stop(struct seq_file *file, void *data) ...@@ -835,10 +819,10 @@ static void sched_debug_stop(struct seq_file *file, void *data)
} }
static const struct seq_operations sched_debug_sops = { static const struct seq_operations sched_debug_sops = {
.start = sched_debug_start, .start = sched_debug_start,
.next = sched_debug_next, .next = sched_debug_next,
.stop = sched_debug_stop, .stop = sched_debug_stop,
.show = sched_debug_show, .show = sched_debug_show,
}; };
static int sched_debug_release(struct inode *inode, struct file *file) static int sched_debug_release(struct inode *inode, struct file *file)
...@@ -876,14 +860,10 @@ static int __init init_sched_debug_procfs(void) ...@@ -876,14 +860,10 @@ static int __init init_sched_debug_procfs(void)
__initcall(init_sched_debug_procfs); __initcall(init_sched_debug_procfs);
#define __P(F) \ #define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) #define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define P(F) \ #define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) #define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
#define __PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
......
This diff is collapsed.
/* /*
* Generic entry point for the idle threads * Generic entry points for the idle threads
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/idle.h> #include <linux/sched/idle.h>
...@@ -332,8 +332,8 @@ void cpu_startup_entry(enum cpuhp_state state) ...@@ -332,8 +332,8 @@ void cpu_startup_entry(enum cpuhp_state state)
{ {
/* /*
* This #ifdef needs to die, but it's too late in the cycle to * This #ifdef needs to die, but it's too late in the cycle to
* make this generic (arm and sh have never invoked the canary * make this generic (ARM and SH have never invoked the canary
* init for the non boot cpus!). Will be fixed in 3.11 * init for the non boot CPUs!). Will be fixed in 3.11
*/ */
#ifdef CONFIG_X86 #ifdef CONFIG_X86
/* /*
......
...@@ -14,7 +14,7 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) ...@@ -14,7 +14,7 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
{ {
return task_cpu(p); /* IDLE tasks as never migrated */ return task_cpu(p); /* IDLE tasks as never migrated */
} }
#endif /* CONFIG_SMP */ #endif
/* /*
* Idle tasks are unconditionally rescheduled: * Idle tasks are unconditionally rescheduled:
...@@ -30,6 +30,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf ...@@ -30,6 +30,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
put_prev_task(rq, prev); put_prev_task(rq, prev);
update_idle_core(rq); update_idle_core(rq);
schedstat_inc(rq->sched_goidle); schedstat_inc(rq->sched_goidle);
return rq->idle; return rq->idle;
} }
......
...@@ -6,13 +6,13 @@ ...@@ -6,13 +6,13 @@
* Copyright (C) 2017-2018 SUSE, Frederic Weisbecker * Copyright (C) 2017-2018 SUSE, Frederic Weisbecker
* *
*/ */
#include <linux/sched/isolation.h> #include <linux/sched/isolation.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include "sched.h" #include "sched.h"
DEFINE_STATIC_KEY_FALSE(housekeeping_overriden); DEFINE_STATIC_KEY_FALSE(housekeeping_overriden);
......
...@@ -32,29 +32,29 @@ ...@@ -32,29 +32,29 @@
* Due to a number of reasons the above turns in the mess below: * Due to a number of reasons the above turns in the mess below:
* *
* - for_each_possible_cpu() is prohibitively expensive on machines with * - for_each_possible_cpu() is prohibitively expensive on machines with
* serious number of cpus, therefore we need to take a distributed approach * serious number of CPUs, therefore we need to take a distributed approach
* to calculating nr_active. * to calculating nr_active.
* *
* \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
* = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
* *
* So assuming nr_active := 0 when we start out -- true per definition, we * So assuming nr_active := 0 when we start out -- true per definition, we
* can simply take per-cpu deltas and fold those into a global accumulate * can simply take per-CPU deltas and fold those into a global accumulate
* to obtain the same result. See calc_load_fold_active(). * to obtain the same result. See calc_load_fold_active().
* *
* Furthermore, in order to avoid synchronizing all per-cpu delta folding * Furthermore, in order to avoid synchronizing all per-CPU delta folding
* across the machine, we assume 10 ticks is sufficient time for every * across the machine, we assume 10 ticks is sufficient time for every
* cpu to have completed this task. * CPU to have completed this task.
* *
* This places an upper-bound on the IRQ-off latency of the machine. Then * This places an upper-bound on the IRQ-off latency of the machine. Then
* again, being late doesn't loose the delta, just wrecks the sample. * again, being late doesn't loose the delta, just wrecks the sample.
* *
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
* this would add another cross-cpu cacheline miss and atomic operation * this would add another cross-CPU cacheline miss and atomic operation
* to the wakeup path. Instead we increment on whatever cpu the task ran * to the wakeup path. Instead we increment on whatever CPU the task ran
* when it went into uninterruptible state and decrement on whatever cpu * when it went into uninterruptible state and decrement on whatever CPU
* did the wakeup. This means that only the sum of nr_uninterruptible over * did the wakeup. This means that only the sum of nr_uninterruptible over
* all cpus yields the correct result. * all CPUs yields the correct result.
* *
* This covers the NO_HZ=n code, for extra head-aches, see the comment below. * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
*/ */
...@@ -115,11 +115,11 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) ...@@ -115,11 +115,11 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* Handle NO_HZ for the global load-average. * Handle NO_HZ for the global load-average.
* *
* Since the above described distributed algorithm to compute the global * Since the above described distributed algorithm to compute the global
* load-average relies on per-cpu sampling from the tick, it is affected by * load-average relies on per-CPU sampling from the tick, it is affected by
* NO_HZ. * NO_HZ.
* *
* The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
* entering NO_HZ state such that we can include this as an 'extra' cpu delta * entering NO_HZ state such that we can include this as an 'extra' CPU delta
* when we read the global state. * when we read the global state.
* *
* Obviously reality has to ruin such a delightfully simple scheme: * Obviously reality has to ruin such a delightfully simple scheme:
...@@ -146,9 +146,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active) ...@@ -146,9 +146,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* busy state. * busy state.
* *
* This is solved by pushing the window forward, and thus skipping the * This is solved by pushing the window forward, and thus skipping the
* sample, for this cpu (effectively using the NO_HZ-delta for this cpu which * sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
* was in effect at the time the window opened). This also solves the issue * was in effect at the time the window opened). This also solves the issue
* of having to deal with a cpu having been in NO_HZ for multiple LOAD_FREQ * of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
* intervals. * intervals.
* *
* When making the ILB scale, we should try to pull this in as well. * When making the ILB scale, we should try to pull this in as well.
...@@ -299,7 +299,7 @@ calc_load_n(unsigned long load, unsigned long exp, ...@@ -299,7 +299,7 @@ calc_load_n(unsigned long load, unsigned long exp,
} }
/* /*
* NO_HZ can leave us missing all per-cpu ticks calling * NO_HZ can leave us missing all per-CPU ticks calling
* calc_load_fold_active(), but since a NO_HZ CPU folds its delta into * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
* calc_load_nohz per calc_load_nohz_start(), all we need to do is fold * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
* in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary. * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
...@@ -363,7 +363,7 @@ void calc_global_load(unsigned long ticks) ...@@ -363,7 +363,7 @@ void calc_global_load(unsigned long ticks)
return; return;
/* /*
* Fold the 'old' NO_HZ-delta to include all NO_HZ cpus. * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
*/ */
delta = calc_load_nohz_fold(); delta = calc_load_nohz_fold();
if (delta) if (delta)
......
...@@ -27,18 +27,18 @@ ...@@ -27,18 +27,18 @@
* except MEMBARRIER_CMD_QUERY. * except MEMBARRIER_CMD_QUERY.
*/ */
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE) | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
#else #else
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
#endif #endif
#define MEMBARRIER_CMD_BITMASK \ #define MEMBARRIER_CMD_BITMASK \
(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \ (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK) | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
static void ipi_mb(void *info) static void ipi_mb(void *info)
...@@ -85,6 +85,7 @@ static int membarrier_global_expedited(void) ...@@ -85,6 +85,7 @@ static int membarrier_global_expedited(void)
*/ */
if (cpu == raw_smp_processor_id()) if (cpu == raw_smp_processor_id())
continue; continue;
rcu_read_lock(); rcu_read_lock();
p = task_rcu_dereference(&cpu_rq(cpu)->curr); p = task_rcu_dereference(&cpu_rq(cpu)->curr);
if (p && p->mm && (atomic_read(&p->mm->membarrier_state) & if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
...@@ -188,6 +189,7 @@ static int membarrier_private_expedited(int flags) ...@@ -188,6 +189,7 @@ static int membarrier_private_expedited(int flags)
* rq->curr modification in scheduler. * rq->curr modification in scheduler.
*/ */
smp_mb(); /* exit from system call is not a mb */ smp_mb(); /* exit from system call is not a mb */
return 0; return 0;
} }
...@@ -219,6 +221,7 @@ static int membarrier_register_global_expedited(void) ...@@ -219,6 +221,7 @@ static int membarrier_register_global_expedited(void)
} }
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY, atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
&mm->membarrier_state); &mm->membarrier_state);
return 0; return 0;
} }
...@@ -253,6 +256,7 @@ static int membarrier_register_private_expedited(int flags) ...@@ -253,6 +256,7 @@ static int membarrier_register_private_expedited(int flags)
synchronize_sched(); synchronize_sched();
} }
atomic_or(state, &mm->membarrier_state); atomic_or(state, &mm->membarrier_state);
return 0; return 0;
} }
......
...@@ -1453,9 +1453,9 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -1453,9 +1453,9 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
return; return;
/* /*
* There appears to be other cpus that can accept * There appear to be other CPUs that can accept
* current and none to run 'p', so lets reschedule * the current task but none can run 'p', so lets reschedule
* to try and push current away: * to try and push the current task away:
*/ */
requeue_task_rt(rq, p, 1); requeue_task_rt(rq, p, 1);
resched_curr(rq); resched_curr(rq);
...@@ -1596,12 +1596,13 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) ...@@ -1596,12 +1596,13 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
if (!task_running(rq, p) && if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, &p->cpus_allowed)) cpumask_test_cpu(cpu, &p->cpus_allowed))
return 1; return 1;
return 0; return 0;
} }
/* /*
* Return the highest pushable rq's task, which is suitable to be executed * Return the highest pushable rq's task, which is suitable to be executed
* on the cpu, NULL otherwise * on the CPU, NULL otherwise
*/ */
static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{ {
...@@ -1639,11 +1640,11 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1639,11 +1640,11 @@ static int find_lowest_rq(struct task_struct *task)
return -1; /* No targets found */ return -1; /* No targets found */
/* /*
* At this point we have built a mask of cpus representing the * At this point we have built a mask of CPUs representing the
* lowest priority tasks in the system. Now we want to elect * lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology. * the best one based on our affinity and topology.
* *
* We prioritize the last cpu that the task executed on since * We prioritize the last CPU that the task executed on since
* it is most likely cache-hot in that location. * it is most likely cache-hot in that location.
*/ */
if (cpumask_test_cpu(cpu, lowest_mask)) if (cpumask_test_cpu(cpu, lowest_mask))
...@@ -1651,7 +1652,7 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1651,7 +1652,7 @@ static int find_lowest_rq(struct task_struct *task)
/* /*
* Otherwise, we consult the sched_domains span maps to figure * Otherwise, we consult the sched_domains span maps to figure
* out which cpu is logically closest to our hot cache data. * out which CPU is logically closest to our hot cache data.
*/ */
if (!cpumask_test_cpu(this_cpu, lowest_mask)) if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */ this_cpu = -1; /* Skip this_cpu opt if not among lowest */
...@@ -1692,6 +1693,7 @@ static int find_lowest_rq(struct task_struct *task) ...@@ -1692,6 +1693,7 @@ static int find_lowest_rq(struct task_struct *task)
cpu = cpumask_any(lowest_mask); cpu = cpumask_any(lowest_mask);
if (cpu < nr_cpu_ids) if (cpu < nr_cpu_ids)
return cpu; return cpu;
return -1; return -1;
} }
...@@ -1827,7 +1829,7 @@ static int push_rt_task(struct rq *rq) ...@@ -1827,7 +1829,7 @@ static int push_rt_task(struct rq *rq)
* The task hasn't migrated, and is still the next * The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue * eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since * to push it to. Do not retry in this case, since
* other cpus will pull from us when ready. * other CPUs will pull from us when ready.
*/ */
goto out; goto out;
} }
...@@ -1919,7 +1921,7 @@ static int rto_next_cpu(struct root_domain *rd) ...@@ -1919,7 +1921,7 @@ static int rto_next_cpu(struct root_domain *rd)
* rt_next_cpu() will simply return the first CPU found in * rt_next_cpu() will simply return the first CPU found in
* the rto_mask. * the rto_mask.
* *
* If rto_next_cpu() is called with rto_cpu is a valid cpu, it * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
* will return the next CPU found in the rto_mask. * will return the next CPU found in the rto_mask.
* *
* If there are no more CPUs left in the rto_mask, then a check is made * If there are no more CPUs left in the rto_mask, then a check is made
...@@ -1980,7 +1982,7 @@ static void tell_cpu_to_push(struct rq *rq) ...@@ -1980,7 +1982,7 @@ static void tell_cpu_to_push(struct rq *rq)
raw_spin_lock(&rq->rd->rto_lock); raw_spin_lock(&rq->rd->rto_lock);
/* /*
* The rto_cpu is updated under the lock, if it has a valid cpu * The rto_cpu is updated under the lock, if it has a valid CPU
* then the IPI is still running and will continue due to the * then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here. * update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent. * Otherwise it is finishing up and an ipi needs to be sent.
...@@ -2105,7 +2107,7 @@ static void pull_rt_task(struct rq *this_rq) ...@@ -2105,7 +2107,7 @@ static void pull_rt_task(struct rq *this_rq)
/* /*
* There's a chance that p is higher in priority * There's a chance that p is higher in priority
* than what's currently running on its cpu. * than what's currently running on its CPU.
* This is just that p is wakeing up and hasn't * This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull * had a chance to schedule. We only pull
* p if it is lower in priority than the * p if it is lower in priority than the
...@@ -2693,6 +2695,7 @@ int sched_rr_handler(struct ctl_table *table, int write, ...@@ -2693,6 +2695,7 @@ int sched_rr_handler(struct ctl_table *table, int write,
msecs_to_jiffies(sysctl_sched_rr_timeslice); msecs_to_jiffies(sysctl_sched_rr_timeslice);
} }
mutex_unlock(&mutex); mutex_unlock(&mutex);
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -78,8 +78,8 @@ static int show_schedstat(struct seq_file *seq, void *v) ...@@ -78,8 +78,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
* This itererator needs some explanation. * This itererator needs some explanation.
* It returns 1 for the header position. * It returns 1 for the header position.
* This means 2 is cpu 0. * This means 2 is cpu 0.
* In a hotplugged system some cpus, including cpu 0, may be missing so we have * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
* to use cpumask_* to iterate over the cpus. * to use cpumask_* to iterate over the CPUs.
*/ */
static void *schedstat_start(struct seq_file *file, loff_t *offset) static void *schedstat_start(struct seq_file *file, loff_t *offset)
{ {
...@@ -99,12 +99,14 @@ static void *schedstat_start(struct seq_file *file, loff_t *offset) ...@@ -99,12 +99,14 @@ static void *schedstat_start(struct seq_file *file, loff_t *offset)
if (n < nr_cpu_ids) if (n < nr_cpu_ids)
return (void *)(unsigned long)(n + 2); return (void *)(unsigned long)(n + 2);
return NULL; return NULL;
} }
static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset) static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
{ {
(*offset)++; (*offset)++;
return schedstat_start(file, offset); return schedstat_start(file, offset);
} }
...@@ -134,6 +136,7 @@ static const struct file_operations proc_schedstat_operations = { ...@@ -134,6 +136,7 @@ static const struct file_operations proc_schedstat_operations = {
static int __init proc_schedstat_init(void) static int __init proc_schedstat_init(void)
{ {
proc_create("schedstat", 0, NULL, &proc_schedstat_operations); proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
return 0; return 0;
} }
subsys_initcall(proc_schedstat_init); subsys_initcall(proc_schedstat_init);
...@@ -30,35 +30,29 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) ...@@ -30,35 +30,29 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
if (rq) if (rq)
rq->rq_sched_info.run_delay += delta; rq->rq_sched_info.run_delay += delta;
} }
#define schedstat_enabled() static_branch_unlikely(&sched_schedstats) #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define __schedstat_inc(var) do { var++; } while (0) #define __schedstat_inc(var) do { var++; } while (0)
#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define __schedstat_add(var, amt) do { var += (amt); } while (0) #define __schedstat_add(var, amt) do { var += (amt); } while (0)
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
#define __schedstat_set(var, val) do { var = (val); } while (0) #define __schedstat_set(var, val) do { var = (val); } while (0)
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
#define schedstat_val(var) (var) #define schedstat_val(var) (var)
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
#else /* !CONFIG_SCHEDSTATS */ #else /* !CONFIG_SCHEDSTATS: */
static inline void static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
rq_sched_info_arrive(struct rq *rq, unsigned long long delta) static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
{} static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
static inline void # define schedstat_enabled() 0
rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) # define __schedstat_inc(var) do { } while (0)
{} # define schedstat_inc(var) do { } while (0)
static inline void # define __schedstat_add(var, amt) do { } while (0)
rq_sched_info_depart(struct rq *rq, unsigned long long delta) # define schedstat_add(var, amt) do { } while (0)
{} # define __schedstat_set(var, val) do { } while (0)
#define schedstat_enabled() 0 # define schedstat_set(var, val) do { } while (0)
#define __schedstat_inc(var) do { } while (0) # define schedstat_val(var) 0
#define schedstat_inc(var) do { } while (0) # define schedstat_val_or_zero(var) 0
#define __schedstat_add(var, amt) do { } while (0)
#define schedstat_add(var, amt) do { } while (0)
#define __schedstat_set(var, val) do { } while (0)
#define schedstat_set(var, val) do { } while (0)
#define schedstat_val(var) 0
#define schedstat_val_or_zero(var) 0
#endif /* CONFIG_SCHEDSTATS */ #endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SCHED_INFO #ifdef CONFIG_SCHED_INFO
...@@ -69,9 +63,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t) ...@@ -69,9 +63,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
/* /*
* We are interested in knowing how long it was from the *first* time a * We are interested in knowing how long it was from the *first* time a
* task was queued to the time that it finally hit a cpu, we call this routine * task was queued to the time that it finally hit a CPU, we call this routine
* from dequeue_task() to account for possible rq->clock skew across cpus. The * from dequeue_task() to account for possible rq->clock skew across CPUs. The
* delta taken on each cpu would annul the skew. * delta taken on each CPU would annul the skew.
*/ */
static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
{ {
...@@ -87,7 +81,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) ...@@ -87,7 +81,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
} }
/* /*
* Called when a task finally hits the cpu. We can now calculate how * Called when a task finally hits the CPU. We can now calculate how
* long it was waiting to run. We also note when it began so that we * long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is. * can keep stats on how long its timeslice is.
*/ */
...@@ -112,9 +106,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) ...@@ -112,9 +106,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
*/ */
static inline void sched_info_queued(struct rq *rq, struct task_struct *t) static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
{ {
if (unlikely(sched_info_on())) if (unlikely(sched_info_on())) {
if (!t->sched_info.last_queued) if (!t->sched_info.last_queued)
t->sched_info.last_queued = rq_clock(rq); t->sched_info.last_queued = rq_clock(rq);
}
} }
/* /*
...@@ -127,8 +122,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t) ...@@ -127,8 +122,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
*/ */
static inline void sched_info_depart(struct rq *rq, struct task_struct *t) static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
{ {
unsigned long long delta = rq_clock(rq) - unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
t->sched_info.last_arrival;
rq_sched_info_depart(rq, delta); rq_sched_info_depart(rq, delta);
...@@ -142,11 +136,10 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t) ...@@ -142,11 +136,10 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
* the idle task.) We are only called when prev != next. * the idle task.) We are only called when prev != next.
*/ */
static inline void static inline void
__sched_info_switch(struct rq *rq, __sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
struct task_struct *prev, struct task_struct *next)
{ {
/* /*
* prev now departs the cpu. It's not interesting to record * prev now departs the CPU. It's not interesting to record
* stats about how efficient we were at scheduling the idle * stats about how efficient we were at scheduling the idle
* process, however. * process, however.
*/ */
...@@ -156,18 +149,19 @@ __sched_info_switch(struct rq *rq, ...@@ -156,18 +149,19 @@ __sched_info_switch(struct rq *rq,
if (next != rq->idle) if (next != rq->idle)
sched_info_arrive(rq, next); sched_info_arrive(rq, next);
} }
static inline void static inline void
sched_info_switch(struct rq *rq, sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
struct task_struct *prev, struct task_struct *next)
{ {
if (unlikely(sched_info_on())) if (unlikely(sched_info_on()))
__sched_info_switch(rq, prev, next); __sched_info_switch(rq, prev, next);
} }
#else
#define sched_info_queued(rq, t) do { } while (0) #else /* !CONFIG_SCHED_INFO: */
#define sched_info_reset_dequeued(t) do { } while (0) # define sched_info_queued(rq, t) do { } while (0)
#define sched_info_dequeued(rq, t) do { } while (0) # define sched_info_reset_dequeued(t) do { } while (0)
#define sched_info_depart(rq, t) do { } while (0) # define sched_info_dequeued(rq, t) do { } while (0)
#define sched_info_arrive(rq, next) do { } while (0) # define sched_info_depart(rq, t) do { } while (0)
#define sched_info_switch(rq, t, next) do { } while (0) # define sched_info_arrive(rq, next) do { } while (0)
# define sched_info_switch(rq, t, next) do { } while (0)
#endif /* CONFIG_SCHED_INFO */ #endif /* CONFIG_SCHED_INFO */
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include "sched.h"
/* /*
* stop-task scheduling class. * stop-task scheduling class.
* *
...@@ -9,6 +7,7 @@ ...@@ -9,6 +7,7 @@
* *
* See kernel/stop_machine.c * See kernel/stop_machine.c
*/ */
#include "sched.h"
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/*
* <linux/swait.h> (simple wait queues ) implementation:
*/
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/swait.h> #include <linux/swait.h>
......
...@@ -41,8 +41,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -41,8 +41,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if (!(sd->flags & SD_LOAD_BALANCE)) { if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n"); printk("does not load-balance\n");
if (sd->parent) if (sd->parent)
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
" has parent");
return -1; return -1;
} }
...@@ -50,12 +49,10 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -50,12 +49,10 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpumask_pr_args(sched_domain_span(sd)), sd->name); cpumask_pr_args(sched_domain_span(sd)), sd->name);
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
printk(KERN_ERR "ERROR: domain->span does not contain " printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
"CPU%d\n", cpu);
} }
if (!cpumask_test_cpu(cpu, sched_group_span(group))) { if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
printk(KERN_ERR "ERROR: domain->groups does not contain" printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
" CPU%d\n", cpu);
} }
printk(KERN_DEBUG "%*s groups:", level + 1, ""); printk(KERN_DEBUG "%*s groups:", level + 1, "");
...@@ -115,8 +112,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ...@@ -115,8 +112,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if (sd->parent && if (sd->parent &&
!cpumask_subset(groupmask, sched_domain_span(sd->parent))) !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
printk(KERN_ERR "ERROR: parent span is not a superset " printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
"of domain->span\n");
return 0; return 0;
} }
...@@ -595,7 +591,7 @@ int group_balance_cpu(struct sched_group *sg) ...@@ -595,7 +591,7 @@ int group_balance_cpu(struct sched_group *sg)
* are not. * are not.
* *
* This leads to a few particularly weird cases where the sched_domain's are * This leads to a few particularly weird cases where the sched_domain's are
* not of the same number for each cpu. Consider: * not of the same number for each CPU. Consider:
* *
* NUMA-2 0-3 0-3 * NUMA-2 0-3 0-3
* groups: {0-2},{1-3} {1-3},{0-2} * groups: {0-2},{1-3} {1-3},{0-2}
...@@ -780,7 +776,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) ...@@ -780,7 +776,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* ^ ^ ^ ^ * ^ ^ ^ ^
* `-' `-' * `-' `-'
* *
* The sched_domains are per-cpu and have a two way link (parent & child) and * The sched_domains are per-CPU and have a two way link (parent & child) and
* denote the ever growing mask of CPUs belonging to that level of topology. * denote the ever growing mask of CPUs belonging to that level of topology.
* *
* Each sched_domain has a circular (double) linked list of sched_group's, each * Each sched_domain has a circular (double) linked list of sched_group's, each
...@@ -1021,6 +1017,7 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) ...@@ -1021,6 +1017,7 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
d->rd = alloc_rootdomain(); d->rd = alloc_rootdomain();
if (!d->rd) if (!d->rd)
return sa_sd; return sa_sd;
return sa_rootdomain; return sa_rootdomain;
} }
...@@ -1047,12 +1044,14 @@ static void claim_allocations(int cpu, struct sched_domain *sd) ...@@ -1047,12 +1044,14 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static int sched_domains_numa_levels;
enum numa_topology_type sched_numa_topology_type; enum numa_topology_type sched_numa_topology_type;
static int *sched_domains_numa_distance;
int sched_max_numa_distance; static int sched_domains_numa_levels;
static struct cpumask ***sched_domains_numa_masks; static int sched_domains_curr_level;
static int sched_domains_curr_level;
int sched_max_numa_distance;
static int *sched_domains_numa_distance;
static struct cpumask ***sched_domains_numa_masks;
#endif #endif
/* /*
...@@ -1074,11 +1073,11 @@ static int sched_domains_curr_level; ...@@ -1074,11 +1073,11 @@ static int sched_domains_curr_level;
* SD_ASYM_PACKING - describes SMT quirks * SD_ASYM_PACKING - describes SMT quirks
*/ */
#define TOPOLOGY_SD_FLAGS \ #define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY | \ (SD_SHARE_CPUCAPACITY | \
SD_SHARE_PKG_RESOURCES | \ SD_SHARE_PKG_RESOURCES | \
SD_NUMA | \ SD_NUMA | \
SD_ASYM_PACKING | \ SD_ASYM_PACKING | \
SD_ASYM_CPUCAPACITY | \ SD_ASYM_CPUCAPACITY | \
SD_SHARE_POWERDOMAIN) SD_SHARE_POWERDOMAIN)
static struct sched_domain * static struct sched_domain *
...@@ -1628,7 +1627,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve ...@@ -1628,7 +1627,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
pr_err(" the %s domain not a subset of the %s domain\n", pr_err(" the %s domain not a subset of the %s domain\n",
child->name, sd->name); child->name, sd->name);
#endif #endif
/* Fixup, ensure @sd has at least @child cpus. */ /* Fixup, ensure @sd has at least @child CPUs. */
cpumask_or(sched_domain_span(sd), cpumask_or(sched_domain_span(sd),
sched_domain_span(sd), sched_domain_span(sd),
sched_domain_span(child)); sched_domain_span(child));
...@@ -1720,6 +1719,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att ...@@ -1720,6 +1719,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
ret = 0; ret = 0;
error: error:
__free_domain_allocs(&d, alloc_state, cpu_map); __free_domain_allocs(&d, alloc_state, cpu_map);
return ret; return ret;
} }
...@@ -1824,6 +1824,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, ...@@ -1824,6 +1824,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
return 1; return 1;
tmp = SD_ATTR_INIT; tmp = SD_ATTR_INIT;
return !memcmp(cur ? (cur + idx_cur) : &tmp, return !memcmp(cur ? (cur + idx_cur) : &tmp,
new ? (new + idx_new) : &tmp, new ? (new + idx_new) : &tmp,
sizeof(struct sched_domain_attr)); sizeof(struct sched_domain_attr));
...@@ -1929,4 +1930,3 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ...@@ -1929,4 +1930,3 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
mutex_unlock(&sched_domains_mutex); mutex_unlock(&sched_domains_mutex);
} }
...@@ -107,6 +107,7 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode, ...@@ -107,6 +107,7 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
break; break;
} }
} }
return nr_exclusive; return nr_exclusive;
} }
...@@ -317,6 +318,7 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) ...@@ -317,6 +318,7 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
spin_unlock(&wq->lock); spin_unlock(&wq->lock);
schedule(); schedule();
spin_lock(&wq->lock); spin_lock(&wq->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(do_wait_intr); EXPORT_SYMBOL(do_wait_intr);
...@@ -333,6 +335,7 @@ int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) ...@@ -333,6 +335,7 @@ int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
spin_unlock_irq(&wq->lock); spin_unlock_irq(&wq->lock);
schedule(); schedule();
spin_lock_irq(&wq->lock); spin_lock_irq(&wq->lock);
return 0; return 0;
} }
EXPORT_SYMBOL(do_wait_intr_irq); EXPORT_SYMBOL(do_wait_intr_irq);
...@@ -378,6 +381,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i ...@@ -378,6 +381,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
if (ret) if (ret)
list_del_init(&wq_entry->entry); list_del_init(&wq_entry->entry);
return ret; return ret;
} }
EXPORT_SYMBOL(autoremove_wake_function); EXPORT_SYMBOL(autoremove_wake_function);
......
...@@ -29,8 +29,8 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync ...@@ -29,8 +29,8 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
wait_bit->key.bit_nr != key->bit_nr || wait_bit->key.bit_nr != key->bit_nr ||
test_bit(key->bit_nr, key->flags)) test_bit(key->bit_nr, key->flags))
return 0; return 0;
else
return autoremove_wake_function(wq_entry, mode, sync, key); return autoremove_wake_function(wq_entry, mode, sync, key);
} }
EXPORT_SYMBOL(wake_bit_function); EXPORT_SYMBOL(wake_bit_function);
...@@ -50,7 +50,9 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_ ...@@ -50,7 +50,9 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
ret = (*action)(&wbq_entry->key, mode); ret = (*action)(&wbq_entry->key, mode);
} while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
finish_wait(wq_head, &wbq_entry->wq_entry); finish_wait(wq_head, &wbq_entry->wq_entry);
return ret; return ret;
} }
EXPORT_SYMBOL(__wait_on_bit); EXPORT_SYMBOL(__wait_on_bit);
...@@ -73,6 +75,7 @@ int __sched out_of_line_wait_on_bit_timeout( ...@@ -73,6 +75,7 @@ int __sched out_of_line_wait_on_bit_timeout(
DEFINE_WAIT_BIT(wq_entry, word, bit); DEFINE_WAIT_BIT(wq_entry, word, bit);
wq_entry.key.timeout = jiffies + timeout; wq_entry.key.timeout = jiffies + timeout;
return __wait_on_bit(wq_head, &wq_entry, action, mode); return __wait_on_bit(wq_head, &wq_entry, action, mode);
} }
EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
...@@ -120,6 +123,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); ...@@ -120,6 +123,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
{ {
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq_head)) if (waitqueue_active(wq_head))
__wake_up(wq_head, TASK_NORMAL, 1, &key); __wake_up(wq_head, TASK_NORMAL, 1, &key);
} }
...@@ -157,6 +161,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) ...@@ -157,6 +161,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
{ {
if (BITS_PER_LONG == 64) { if (BITS_PER_LONG == 64) {
unsigned long q = (unsigned long)p; unsigned long q = (unsigned long)p;
return bit_waitqueue((void *)(q & ~1), q & 1); return bit_waitqueue((void *)(q & ~1), q & 1);
} }
return bit_waitqueue(p, 0); return bit_waitqueue(p, 0);
...@@ -173,6 +178,7 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo ...@@ -173,6 +178,7 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo
wait_bit->key.bit_nr != key->bit_nr || wait_bit->key.bit_nr != key->bit_nr ||
atomic_read(val) != 0) atomic_read(val) != 0)
return 0; return 0;
return autoremove_wake_function(wq_entry, mode, sync, key); return autoremove_wake_function(wq_entry, mode, sync, key);
} }
...@@ -196,6 +202,7 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en ...@@ -196,6 +202,7 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en
ret = (*action)(val, mode); ret = (*action)(val, mode);
} while (!ret && atomic_read(val) != 0); } while (!ret && atomic_read(val) != 0);
finish_wait(wq_head, &wbq_entry->wq_entry); finish_wait(wq_head, &wbq_entry->wq_entry);
return ret; return ret;
} }
...@@ -226,6 +233,7 @@ __sched int atomic_t_wait(atomic_t *counter, unsigned int mode) ...@@ -226,6 +233,7 @@ __sched int atomic_t_wait(atomic_t *counter, unsigned int mode)
schedule(); schedule();
if (signal_pending_state(mode, current)) if (signal_pending_state(mode, current))
return -EINTR; return -EINTR;
return 0; return 0;
} }
EXPORT_SYMBOL(atomic_t_wait); EXPORT_SYMBOL(atomic_t_wait);
...@@ -250,6 +258,7 @@ __sched int bit_wait(struct wait_bit_key *word, int mode) ...@@ -250,6 +258,7 @@ __sched int bit_wait(struct wait_bit_key *word, int mode)
schedule(); schedule();
if (signal_pending_state(mode, current)) if (signal_pending_state(mode, current))
return -EINTR; return -EINTR;
return 0; return 0;
} }
EXPORT_SYMBOL(bit_wait); EXPORT_SYMBOL(bit_wait);
...@@ -259,6 +268,7 @@ __sched int bit_wait_io(struct wait_bit_key *word, int mode) ...@@ -259,6 +268,7 @@ __sched int bit_wait_io(struct wait_bit_key *word, int mode)
io_schedule(); io_schedule();
if (signal_pending_state(mode, current)) if (signal_pending_state(mode, current))
return -EINTR; return -EINTR;
return 0; return 0;
} }
EXPORT_SYMBOL(bit_wait_io); EXPORT_SYMBOL(bit_wait_io);
...@@ -266,11 +276,13 @@ EXPORT_SYMBOL(bit_wait_io); ...@@ -266,11 +276,13 @@ EXPORT_SYMBOL(bit_wait_io);
__sched int bit_wait_timeout(struct wait_bit_key *word, int mode) __sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
{ {
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
if (time_after_eq(now, word->timeout)) if (time_after_eq(now, word->timeout))
return -EAGAIN; return -EAGAIN;
schedule_timeout(word->timeout - now); schedule_timeout(word->timeout - now);
if (signal_pending_state(mode, current)) if (signal_pending_state(mode, current))
return -EINTR; return -EINTR;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(bit_wait_timeout); EXPORT_SYMBOL_GPL(bit_wait_timeout);
...@@ -278,11 +290,13 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); ...@@ -278,11 +290,13 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
{ {
unsigned long now = READ_ONCE(jiffies); unsigned long now = READ_ONCE(jiffies);
if (time_after_eq(now, word->timeout)) if (time_after_eq(now, word->timeout))
return -EAGAIN; return -EAGAIN;
io_schedule_timeout(word->timeout - now); io_schedule_timeout(word->timeout - now);
if (signal_pending_state(mode, current)) if (signal_pending_state(mode, current))
return -EINTR; return -EINTR;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(bit_wait_io_timeout); EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment