Commit ee30a7b2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

locking, sched: Annotate thread_group_cputimer as raw

The thread_group_cputimer lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 07354eb1
...@@ -42,7 +42,7 @@ extern struct fs_struct init_fs; ...@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
.cputimer = { \ .cputimer = { \
.cputime = INIT_CPUTIME, \ .cputime = INIT_CPUTIME, \
.running = 0, \ .running = 0, \
.lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \ }, \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
......
...@@ -510,7 +510,7 @@ struct task_cputime { ...@@ -510,7 +510,7 @@ struct task_cputime {
struct thread_group_cputimer { struct thread_group_cputimer {
struct task_cputime cputime; struct task_cputime cputime;
int running; int running;
spinlock_t lock; raw_spinlock_t lock;
}; };
#include <linux/rwsem.h> #include <linux/rwsem.h>
...@@ -2566,7 +2566,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); ...@@ -2566,7 +2566,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig) static inline void thread_group_cputime_init(struct signal_struct *sig)
{ {
spin_lock_init(&sig->cputimer.lock); raw_spin_lock_init(&sig->cputimer.lock);
} }
/* /*
......
...@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -274,7 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
struct task_cputime sum; struct task_cputime sum;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cputimer->lock, flags); raw_spin_lock_irqsave(&cputimer->lock, flags);
if (!cputimer->running) { if (!cputimer->running) {
cputimer->running = 1; cputimer->running = 1;
/* /*
...@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) ...@@ -287,7 +287,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
update_gt_cputime(&cputimer->cputime, &sum); update_gt_cputime(&cputimer->cputime, &sum);
} }
*times = cputimer->cputime; *times = cputimer->cputime;
spin_unlock_irqrestore(&cputimer->lock, flags); raw_spin_unlock_irqrestore(&cputimer->lock, flags);
} }
/* /*
...@@ -997,9 +997,9 @@ static void stop_process_timers(struct signal_struct *sig) ...@@ -997,9 +997,9 @@ static void stop_process_timers(struct signal_struct *sig)
struct thread_group_cputimer *cputimer = &sig->cputimer; struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&cputimer->lock, flags); raw_spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0; cputimer->running = 0;
spin_unlock_irqrestore(&cputimer->lock, flags); raw_spin_unlock_irqrestore(&cputimer->lock, flags);
} }
static u32 onecputick; static u32 onecputick;
...@@ -1289,9 +1289,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) ...@@ -1289,9 +1289,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if (sig->cputimer.running) { if (sig->cputimer.running) {
struct task_cputime group_sample; struct task_cputime group_sample;
spin_lock(&sig->cputimer.lock); raw_spin_lock(&sig->cputimer.lock);
group_sample = sig->cputimer.cputime; group_sample = sig->cputimer.cputime;
spin_unlock(&sig->cputimer.lock); raw_spin_unlock(&sig->cputimer.lock);
if (task_cputime_expired(&group_sample, &sig->cputime_expires)) if (task_cputime_expired(&group_sample, &sig->cputime_expires))
return 1; return 1;
......
...@@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk, ...@@ -282,10 +282,10 @@ static inline void account_group_user_time(struct task_struct *tsk,
if (!cputimer->running) if (!cputimer->running)
return; return;
spin_lock(&cputimer->lock); raw_spin_lock(&cputimer->lock);
cputimer->cputime.utime = cputimer->cputime.utime =
cputime_add(cputimer->cputime.utime, cputime); cputime_add(cputimer->cputime.utime, cputime);
spin_unlock(&cputimer->lock); raw_spin_unlock(&cputimer->lock);
} }
/** /**
...@@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk, ...@@ -306,10 +306,10 @@ static inline void account_group_system_time(struct task_struct *tsk,
if (!cputimer->running) if (!cputimer->running)
return; return;
spin_lock(&cputimer->lock); raw_spin_lock(&cputimer->lock);
cputimer->cputime.stime = cputimer->cputime.stime =
cputime_add(cputimer->cputime.stime, cputime); cputime_add(cputimer->cputime.stime, cputime);
spin_unlock(&cputimer->lock); raw_spin_unlock(&cputimer->lock);
} }
/** /**
...@@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, ...@@ -330,7 +330,7 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
if (!cputimer->running) if (!cputimer->running)
return; return;
spin_lock(&cputimer->lock); raw_spin_lock(&cputimer->lock);
cputimer->cputime.sum_exec_runtime += ns; cputimer->cputime.sum_exec_runtime += ns;
spin_unlock(&cputimer->lock); raw_spin_unlock(&cputimer->lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment