Commit 5580723f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Misc fixlets: a fair number of them resulting from the new
  SCHED_DEADLINE code"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/deadline: Remove useless dl_nr_total
  sched/deadline: Test for CPU's presence explicitly
  sched: Add 'flags' argument to sched_{set,get}attr() syscalls
  sched: Fix information leak in sys_sched_getattr()
  sched,numa: add cond_resched to task_numa_work
  sched/core: Make dl_b->lock IRQ safe
  sched/core: Fix sched_rt_global_validate
  sched/deadline: Fix overflow to handle period==0 and deadline!=0
  sched/deadline: Fix bad accounting of nr_running
parents 9b3e7c9b 995b9ea4
...@@ -281,13 +281,15 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, ...@@ -281,13 +281,15 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
asmlinkage long sys_sched_setparam(pid_t pid, asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param); struct sched_param __user *param);
asmlinkage long sys_sched_setattr(pid_t pid, asmlinkage long sys_sched_setattr(pid_t pid,
struct sched_attr __user *attr); struct sched_attr __user *attr,
unsigned int flags);
asmlinkage long sys_sched_getscheduler(pid_t pid); asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid, asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param); struct sched_param __user *param);
asmlinkage long sys_sched_getattr(pid_t pid, asmlinkage long sys_sched_getattr(pid_t pid,
struct sched_attr __user *attr, struct sched_attr __user *attr,
unsigned int size); unsigned int size,
unsigned int flags);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr); unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
......
...@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy, ...@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,
{ {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
u64 period = attr->sched_period; u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime; u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1; int cpus, err = -1;
...@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ...@@ -3661,13 +3661,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
* @pid: the pid in question. * @pid: the pid in question.
* @uattr: structure containing the extended parameters. * @uattr: structure containing the extended parameters.
*/ */
SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr) SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, flags)
{ {
struct sched_attr attr; struct sched_attr attr;
struct task_struct *p; struct task_struct *p;
int retval; int retval;
if (!uattr || pid < 0) if (!uattr || pid < 0 || flags)
return -EINVAL; return -EINVAL;
if (sched_copy_attr(uattr, &attr)) if (sched_copy_attr(uattr, &attr))
...@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr, ...@@ -3786,7 +3787,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
attr->size = usize; attr->size = usize;
} }
ret = copy_to_user(uattr, attr, usize); ret = copy_to_user(uattr, attr, attr->size);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
...@@ -3804,8 +3805,8 @@ static int sched_read_attr(struct sched_attr __user *uattr, ...@@ -3804,8 +3805,8 @@ static int sched_read_attr(struct sched_attr __user *uattr,
* @uattr: structure containing the extended parameters. * @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp. * @size: sizeof(attr) for fwd/bwd comp.
*/ */
SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, size) unsigned int, size, unsigned int, flags)
{ {
struct sched_attr attr = { struct sched_attr attr = {
.size = sizeof(struct sched_attr), .size = sizeof(struct sched_attr),
...@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -3814,7 +3815,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
int retval; int retval;
if (!uattr || pid < 0 || size > PAGE_SIZE || if (!uattr || pid < 0 || size > PAGE_SIZE ||
size < SCHED_ATTR_SIZE_VER0) size < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL; return -EINVAL;
rcu_read_lock(); rcu_read_lock();
...@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void) ...@@ -7422,6 +7423,7 @@ static int sched_dl_global_constraints(void)
u64 period = global_rt_period(); u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime); u64 new_bw = to_ratio(period, runtime);
int cpu, ret = 0; int cpu, ret = 0;
unsigned long flags;
/* /*
* Here we want to check the bandwidth not being set to some * Here we want to check the bandwidth not being set to some
...@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void) ...@@ -7435,10 +7437,10 @@ static int sched_dl_global_constraints(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu); struct dl_bw *dl_b = dl_bw_of(cpu);
raw_spin_lock(&dl_b->lock); raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw) if (new_bw < dl_b->total_bw)
ret = -EBUSY; ret = -EBUSY;
raw_spin_unlock(&dl_b->lock); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
if (ret) if (ret)
break; break;
...@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void) ...@@ -7451,6 +7453,7 @@ static void sched_dl_do_global(void)
{ {
u64 new_bw = -1; u64 new_bw = -1;
int cpu; int cpu;
unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period(); def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime(); def_dl_bandwidth.dl_runtime = global_rt_runtime();
...@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void) ...@@ -7464,9 +7467,9 @@ static void sched_dl_do_global(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu); struct dl_bw *dl_b = dl_bw_of(cpu);
raw_spin_lock(&dl_b->lock); raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw; dl_b->bw = new_bw;
raw_spin_unlock(&dl_b->lock); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
} }
} }
...@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void) ...@@ -7475,7 +7478,8 @@ static int sched_rt_global_validate(void)
if (sysctl_sched_rt_period <= 0) if (sysctl_sched_rt_period <= 0)
return -EINVAL; return -EINVAL;
if (sysctl_sched_rt_runtime > sysctl_sched_rt_period) if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
(sysctl_sched_rt_runtime > sysctl_sched_rt_period))
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) ...@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
{ {
WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID); WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
if (dl_time_before(new_dl, cp->elements[idx].dl)) { if (dl_time_before(new_dl, cp->elements[idx].dl)) {
cp->elements[idx].dl = new_dl; cp->elements[idx].dl = new_dl;
...@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
} }
out: out:
WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1); WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
return best_cpu; return best_cpu;
} }
...@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) ...@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
int old_idx, new_cpu; int old_idx, new_cpu;
unsigned long flags; unsigned long flags;
WARN_ON(cpu > num_present_cpus()); WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags); raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->cpu_to_idx[cpu]; old_idx = cp->cpu_to_idx[cpu];
......
...@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq) ...@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
static void update_dl_migration(struct dl_rq *dl_rq) static void update_dl_migration(struct dl_rq *dl_rq)
{ {
if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) { if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq)); dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1; dl_rq->overloaded = 1;
...@@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -137,7 +137,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
dl_rq = &rq_of_dl_rq(dl_rq)->dl; dl_rq = &rq_of_dl_rq(dl_rq)->dl;
dl_rq->dl_nr_total++;
if (p->nr_cpus_allowed > 1) if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++; dl_rq->dl_nr_migratory++;
...@@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -149,7 +148,6 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
struct task_struct *p = dl_task_of(dl_se); struct task_struct *p = dl_task_of(dl_se);
dl_rq = &rq_of_dl_rq(dl_rq)->dl; dl_rq = &rq_of_dl_rq(dl_rq)->dl;
dl_rq->dl_nr_total--;
if (p->nr_cpus_allowed > 1) if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--; dl_rq->dl_nr_migratory--;
...@@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -717,6 +715,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio)); WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++; dl_rq->dl_nr_running++;
inc_nr_running(rq_of_dl_rq(dl_rq));
inc_dl_deadline(dl_rq, deadline); inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq); inc_dl_migration(dl_se, dl_rq);
...@@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) ...@@ -730,6 +729,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio)); WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running); WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--; dl_rq->dl_nr_running--;
dec_nr_running(rq_of_dl_rq(dl_rq));
dec_dl_deadline(dl_rq, dl_se->deadline); dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq); dec_dl_migration(dl_se, dl_rq);
...@@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) ...@@ -836,8 +836,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1) if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p); enqueue_pushable_dl_task(rq, p);
inc_nr_running(rq);
} }
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
...@@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) ...@@ -850,8 +848,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{ {
update_curr_dl(rq); update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags); __dequeue_task_dl(rq, p, flags);
dec_nr_running(rq);
} }
/* /*
......
...@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work) ...@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)
start = end; start = end;
if (pages <= 0) if (pages <= 0)
goto out; goto out;
cond_resched();
} while (end != vma->vm_end); } while (end != vma->vm_end);
} }
......
...@@ -462,7 +462,6 @@ struct dl_rq { ...@@ -462,7 +462,6 @@ struct dl_rq {
} earliest_dl; } earliest_dl;
unsigned long dl_nr_migratory; unsigned long dl_nr_migratory;
unsigned long dl_nr_total;
int overloaded; int overloaded;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment