Commit 8a99b683 authored by Peter Zijlstra's avatar Peter Zijlstra

sched: Move SCHED_DEBUG sysctl to debugfs

Stop polluting sysctl with undocumented knobs that really are debug
only, move them all to /debug/sched/ along with the existing
/debug/sched_* files that already exist.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tested-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210412102001.287610138@infradead.org
parent d86ba831
...@@ -26,10 +26,11 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, ...@@ -26,10 +26,11 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
enum { sysctl_hung_task_timeout_secs = 0 }; enum { sysctl_hung_task_timeout_secs = 0 };
#endif #endif
extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_child_runs_first;
enum sched_tunable_scaling { enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_NONE,
...@@ -37,7 +38,7 @@ enum sched_tunable_scaling { ...@@ -37,7 +38,7 @@ enum sched_tunable_scaling {
SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END, SCHED_TUNABLESCALING_END,
}; };
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; extern unsigned int sysctl_sched_tunable_scaling;
extern unsigned int sysctl_numa_balancing_scan_delay; extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min; extern unsigned int sysctl_numa_balancing_scan_period_min;
...@@ -47,9 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_size; ...@@ -47,9 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate; extern __read_mostly unsigned int sysctl_sched_nr_migrate;
int sched_proc_update_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos);
#endif #endif
/* /*
......
...@@ -5504,9 +5504,11 @@ static const struct file_operations sched_dynamic_fops = { ...@@ -5504,9 +5504,11 @@ static const struct file_operations sched_dynamic_fops = {
.release = single_release, .release = single_release,
}; };
extern struct dentry *debugfs_sched;
static __init int sched_init_debug_dynamic(void) static __init int sched_init_debug_dynamic(void)
{ {
debugfs_create_file("sched_preempt", 0644, NULL, NULL, &sched_dynamic_fops); debugfs_create_file("sched_preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
return 0; return 0;
} }
late_initcall(sched_init_debug_dynamic); late_initcall(sched_init_debug_dynamic);
......
...@@ -169,15 +169,81 @@ static const struct file_operations sched_feat_fops = { ...@@ -169,15 +169,81 @@ static const struct file_operations sched_feat_fops = {
.release = single_release, .release = single_release,
}; };
#ifdef CONFIG_SMP
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[16];
if (cnt > 15)
cnt = 15;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
return -EINVAL;
if (sched_update_scaling())
return -EINVAL;
*ppos += cnt;
return cnt;
}
static int sched_scaling_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
return 0;
}
static int sched_scaling_open(struct inode *inode, struct file *filp)
{
return single_open(filp, sched_scaling_show, NULL);
}
static const struct file_operations sched_scaling_fops = {
.open = sched_scaling_open,
.write = sched_scaling_write,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* SMP */
__read_mostly bool sched_debug_enabled; __read_mostly bool sched_debug_enabled;
struct dentry *debugfs_sched;
static __init int sched_init_debug(void) static __init int sched_init_debug(void)
{ {
debugfs_create_file("sched_features", 0644, NULL, NULL, struct dentry __maybe_unused *numa;
&sched_feat_fops);
debugfs_create_bool("sched_debug", 0644, NULL, debugfs_sched = debugfs_create_dir("sched", NULL);
&sched_debug_enabled);
debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
debugfs_create_bool("debug_enabled", 0644, debugfs_sched, &sched_debug_enabled);
debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
#endif
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
#endif
return 0; return 0;
} }
......
...@@ -49,7 +49,7 @@ static unsigned int normalized_sysctl_sched_latency = 6000000ULL; ...@@ -49,7 +49,7 @@ static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
* *
* (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/ */
enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
/* /*
* Minimal preemption granularity for CPU-bound tasks: * Minimal preemption granularity for CPU-bound tasks:
...@@ -634,15 +634,10 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) ...@@ -634,15 +634,10 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods: * Scheduling class statistics methods:
*/ */
int sched_proc_update_handler(struct ctl_table *table, int write, int sched_update_scaling(void)
void *buffer, size_t *lenp, loff_t *ppos)
{ {
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
unsigned int factor = get_update_sysctl_factor(); unsigned int factor = get_update_sysctl_factor();
if (ret || !write)
return ret;
sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
sysctl_sched_min_granularity); sysctl_sched_min_granularity);
......
...@@ -1568,6 +1568,8 @@ static inline void unregister_sched_domain_sysctl(void) ...@@ -1568,6 +1568,8 @@ static inline void unregister_sched_domain_sysctl(void)
} }
#endif #endif
extern int sched_update_scaling(void);
extern void flush_smp_call_function_from_idle(void); extern void flush_smp_call_function_from_idle(void);
#else /* !CONFIG_SMP: */ #else /* !CONFIG_SMP: */
......
...@@ -184,17 +184,6 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; ...@@ -184,17 +184,6 @@ static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT;
int sysctl_legacy_va_layout; int sysctl_legacy_va_layout;
#endif #endif
#ifdef CONFIG_SCHED_DEBUG
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
#ifdef CONFIG_SMP
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
#endif /* CONFIG_SMP */
#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
static int min_extfrag_threshold; static int min_extfrag_threshold;
static int max_extfrag_threshold = 1000; static int max_extfrag_threshold = 1000;
...@@ -1659,91 +1648,6 @@ static struct ctl_table kern_table[] = { ...@@ -1659,91 +1648,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
.data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
.procname = "sched_latency_ns",
.data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
{
.procname = "sched_wakeup_granularity_ns",
.data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
#ifdef CONFIG_SMP
{
.procname = "sched_tunable_scaling",
.data = &sysctl_sched_tunable_scaling,
.maxlen = sizeof(enum sched_tunable_scaling),
.mode = 0644,
.proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_tunable_scaling,
.extra2 = &max_sched_tunable_scaling,
},
{
.procname = "sched_migration_cost_ns",
.data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sched_nr_migrate",
.data = &sysctl_sched_nr_migrate,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING
{
.procname = "numa_balancing_scan_delay_ms",
.data = &sysctl_numa_balancing_scan_delay,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "numa_balancing_scan_period_min_ms",
.data = &sysctl_numa_balancing_scan_period_min,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "numa_balancing_scan_period_max_ms",
.data = &sysctl_numa_balancing_scan_period_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "numa_balancing_scan_size_mb",
.data = &sysctl_numa_balancing_scan_size,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
{ {
.procname = "sched_schedstats", .procname = "sched_schedstats",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment