Commit 78f2c7db authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: SCHED_FIFO/SCHED_RR watchdog timer

Introduce a new rlimit that allows the user to set a runtime timeout on
real-time tasks their slice. Once this limit is exceeded the task will receive
SIGXCPU.

So it measures runtime since the last sleep.

Input and ideas by Thomas Gleixner and Lennart Poettering.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
CC: Lennart Poettering <mzxreary@0pointer.de>
CC: Michael Kerrisk <mtk.manpages@googlemail.com>
CC: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent fa717060
...@@ -44,8 +44,8 @@ ...@@ -44,8 +44,8 @@
#define RLIMIT_NICE 13 /* max nice prio allowed to raise to #define RLIMIT_NICE 13 /* max nice prio allowed to raise to
0-39 for nice level 19 .. -20 */ 0-39 for nice level 19 .. -20 */
#define RLIMIT_RTPRIO 14 /* maximum realtime priority */ #define RLIMIT_RTPRIO 14 /* maximum realtime priority */
#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
#define RLIM_NLIMITS 15 #define RLIM_NLIMITS 16
/* /*
* SuS says limits have to be unsigned. * SuS says limits have to be unsigned.
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
[RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
[RLIMIT_NICE] = { 0, 0 }, \ [RLIMIT_NICE] = { 0, 0 }, \
[RLIMIT_RTPRIO] = { 0, 0 }, \ [RLIMIT_RTPRIO] = { 0, 0 }, \
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -932,6 +932,7 @@ struct sched_entity { ...@@ -932,6 +932,7 @@ struct sched_entity {
struct sched_rt_entity { struct sched_rt_entity {
struct list_head run_list; struct list_head run_list;
unsigned int time_slice; unsigned int time_slice;
unsigned long timeout;
}; };
struct task_struct { struct task_struct {
......
...@@ -967,6 +967,7 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -967,6 +967,7 @@ static void check_thread_timers(struct task_struct *tsk,
{ {
int maxfire; int maxfire;
struct list_head *timers = tsk->cpu_timers; struct list_head *timers = tsk->cpu_timers;
struct signal_struct *const sig = tsk->signal;
maxfire = 20; maxfire = 20;
tsk->it_prof_expires = cputime_zero; tsk->it_prof_expires = cputime_zero;
...@@ -1011,6 +1012,34 @@ static void check_thread_timers(struct task_struct *tsk, ...@@ -1011,6 +1012,34 @@ static void check_thread_timers(struct task_struct *tsk,
t->firing = 1; t->firing = 1;
list_move_tail(&t->entry, firing); list_move_tail(&t->entry, firing);
} }
/*
* Check for the special case thread timers.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
if (tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
/*
* At the hard limit, we just die.
* No need to calculate anything else now.
*/
__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
return;
}
if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
/*
* At the soft limit, send a SIGXCPU every second.
*/
if (sig->rlim[RLIMIT_RTTIME].rlim_cur
< sig->rlim[RLIMIT_RTTIME].rlim_max) {
sig->rlim[RLIMIT_RTTIME].rlim_cur +=
USEC_PER_SEC;
}
__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
}
}
} }
/* /*
......
...@@ -116,6 +116,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) ...@@ -116,6 +116,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
inc_cpu_load(rq, p->se.load.weight); inc_cpu_load(rq, p->se.load.weight);
inc_rt_tasks(p, rq); inc_rt_tasks(p, rq);
if (wakeup)
p->rt.timeout = 0;
} }
/* /*
...@@ -834,11 +837,38 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p, ...@@ -834,11 +837,38 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
} }
} }
static void watchdog(struct rq *rq, struct task_struct *p)
{
unsigned long soft, hard;
if (!p->signal)
return;
soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
if (soft != RLIM_INFINITY) {
unsigned long next;
p->rt.timeout++;
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (next > p->rt.timeout) {
u64 next_time = p->se.sum_exec_runtime;
next_time += next * (NSEC_PER_SEC/HZ);
if (p->it_sched_expires > next_time)
p->it_sched_expires = next_time;
} else
p->it_sched_expires = p->se.sum_exec_runtime;
}
}
static void task_tick_rt(struct rq *rq, struct task_struct *p) static void task_tick_rt(struct rq *rq, struct task_struct *p)
{ {
update_curr_rt(rq); update_curr_rt(rq);
watchdog(rq, p);
/* /*
* RR tasks need a special form of timeslice management. * RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices. * FIFO tasks have no timeslices.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment