Commit febe162d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Simplify syscalls

Use guards to reduce gotos and simplify control flow.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 94b548a1
...@@ -7506,6 +7506,21 @@ static struct task_struct *find_process_by_pid(pid_t pid) ...@@ -7506,6 +7506,21 @@ static struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current; return pid ? find_task_by_vpid(pid) : current;
} }
static struct task_struct *find_get_task(pid_t pid)
{
struct task_struct *p;
guard(rcu)();
p = find_process_by_pid(pid);
if (likely(p))
get_task_struct(p);
return p;
}
DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T),
find_get_task(pid), pid_t pid)
/* /*
* sched_setparam() passes in -1 for its policy, to let the functions * sched_setparam() passes in -1 for its policy, to let the functions
* it calls know not to change it. * it calls know not to change it.
...@@ -7543,14 +7558,11 @@ static void __setscheduler_params(struct task_struct *p, ...@@ -7543,14 +7558,11 @@ static void __setscheduler_params(struct task_struct *p,
static bool check_same_owner(struct task_struct *p) static bool check_same_owner(struct task_struct *p)
{ {
const struct cred *cred = current_cred(), *pcred; const struct cred *cred = current_cred(), *pcred;
bool match; guard(rcu)();
rcu_read_lock();
pcred = __task_cred(p); pcred = __task_cred(p);
match = (uid_eq(cred->euid, pcred->euid) || return (uid_eq(cred->euid, pcred->euid) ||
uid_eq(cred->euid, pcred->uid)); uid_eq(cred->euid, pcred->uid));
rcu_read_unlock();
return match;
} }
/* /*
...@@ -7962,27 +7974,17 @@ static int ...@@ -7962,27 +7974,17 @@ static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{ {
struct sched_param lparam; struct sched_param lparam;
struct task_struct *p;
int retval;
if (!param || pid < 0) if (!param || pid < 0)
return -EINVAL; return -EINVAL;
if (copy_from_user(&lparam, param, sizeof(struct sched_param))) if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
return -EFAULT; return -EFAULT;
rcu_read_lock(); CLASS(find_get_task, p)(pid);
retval = -ESRCH; if (!p)
p = find_process_by_pid(pid); return -ESRCH;
if (likely(p))
get_task_struct(p);
rcu_read_unlock();
if (likely(p)) {
retval = sched_setscheduler(p, policy, &lparam);
put_task_struct(p);
}
return retval; return sched_setscheduler(p, policy, &lparam);
} }
/* /*
...@@ -8078,7 +8080,6 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -8078,7 +8080,6 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, flags) unsigned int, flags)
{ {
struct sched_attr attr; struct sched_attr attr;
struct task_struct *p;
int retval; int retval;
if (!uattr || pid < 0 || flags) if (!uattr || pid < 0 || flags)
...@@ -8093,21 +8094,14 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -8093,21 +8094,14 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY)
attr.sched_policy = SETPARAM_POLICY; attr.sched_policy = SETPARAM_POLICY;
rcu_read_lock(); CLASS(find_get_task, p)(pid);
retval = -ESRCH; if (!p)
p = find_process_by_pid(pid); return -ESRCH;
if (likely(p))
get_task_struct(p);
rcu_read_unlock();
if (likely(p)) {
if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS)
get_params(p, &attr); get_params(p, &attr);
retval = sched_setattr(p, &attr);
put_task_struct(p);
}
return retval; return sched_setattr(p, &attr);
} }
/** /**
...@@ -8125,16 +8119,17 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ...@@ -8125,16 +8119,17 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
if (pid < 0) if (pid < 0)
return -EINVAL; return -EINVAL;
retval = -ESRCH; guard(rcu)();
rcu_read_lock();
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
if (p) { if (!p)
return -ESRCH;
retval = security_task_getscheduler(p); retval = security_task_getscheduler(p);
if (!retval) if (!retval) {
retval = p->policy retval = p->policy;
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); if (p->sched_reset_on_fork)
retval |= SCHED_RESET_ON_FORK;
} }
rcu_read_unlock();
return retval; return retval;
} }
...@@ -8155,30 +8150,23 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ...@@ -8155,30 +8150,23 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
if (!param || pid < 0) if (!param || pid < 0)
return -EINVAL; return -EINVAL;
rcu_read_lock(); scoped_guard (rcu) {
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p) if (!p)
goto out_unlock; return -ESRCH;
retval = security_task_getscheduler(p); retval = security_task_getscheduler(p);
if (retval) if (retval)
goto out_unlock; return retval;
if (task_has_rt_policy(p)) if (task_has_rt_policy(p))
lp.sched_priority = p->rt_priority; lp.sched_priority = p->rt_priority;
rcu_read_unlock(); }
/* /*
* This one might sleep, we cannot do it with a spinlock held ... * This one might sleep, we cannot do it with a spinlock held ...
*/ */
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
return retval;
out_unlock:
rcu_read_unlock();
return retval;
} }
/* /*
...@@ -8238,15 +8226,14 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -8238,15 +8226,14 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
usize < SCHED_ATTR_SIZE_VER0 || flags) usize < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL; return -EINVAL;
rcu_read_lock(); scoped_guard (rcu) {
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p) if (!p)
goto out_unlock; return -ESRCH;
retval = security_task_getscheduler(p); retval = security_task_getscheduler(p);
if (retval) if (retval)
goto out_unlock; return retval;
kattr.sched_policy = p->policy; kattr.sched_policy = p->policy;
if (p->sched_reset_on_fork) if (p->sched_reset_on_fork)
...@@ -8263,14 +8250,9 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ...@@ -8263,14 +8250,9 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
#endif #endif
}
rcu_read_unlock();
return sched_attr_copy_to_user(uattr, &kattr, usize); return sched_attr_copy_to_user(uattr, &kattr, usize);
out_unlock:
rcu_read_unlock();
return retval;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment