Commit bf9fae9f authored by Frederic Weisbecker's avatar Frederic Weisbecker

cputime: Use a proper subsystem naming for vtime related APIs

Use a naming based on vtime as a prefix for virtual based
cputime accounting APIs:

- account_system_vtime() -> vtime_account()
- account_switch_vtime() -> vtime_task_switch()

It makes it easier to allow for further declension such
as vtime_account_system(), vtime_account_idle(), ... if we
want to find out the context we account to from generic code.

This also make it better to know on which subsystem these APIs
refer to.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
parent bc2a27cd
...@@ -88,7 +88,7 @@ extern cputime_t cycle_to_cputime(u64 cyc); ...@@ -88,7 +88,7 @@ extern cputime_t cycle_to_cputime(u64 cyc);
* accumulated times to the current process, and to prepare accounting on * accumulated times to the current process, and to prepare accounting on
* the next process. * the next process.
*/ */
void account_switch_vtime(struct task_struct *prev) void vtime_task_switch(struct task_struct *prev)
{ {
struct thread_info *pi = task_thread_info(prev); struct thread_info *pi = task_thread_info(prev);
struct thread_info *ni = task_thread_info(current); struct thread_info *ni = task_thread_info(current);
...@@ -116,7 +116,7 @@ void account_switch_vtime(struct task_struct *prev) ...@@ -116,7 +116,7 @@ void account_switch_vtime(struct task_struct *prev)
* Account time for a transition between system, hard irq or soft irq state. * Account time for a transition between system, hard irq or soft irq state.
* Note that this function is called with interrupts enabled. * Note that this function is called with interrupts enabled.
*/ */
void account_system_vtime(struct task_struct *tsk) void vtime_account(struct task_struct *tsk)
{ {
struct thread_info *ti = task_thread_info(tsk); struct thread_info *ti = task_thread_info(tsk);
unsigned long flags; unsigned long flags;
...@@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(account_system_vtime); EXPORT_SYMBOL_GPL(vtime_account);
/* /*
* Called from the timer interrupt handler to charge accumulated user time * Called from the timer interrupt handler to charge accumulated user time
......
...@@ -291,7 +291,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb) ...@@ -291,7 +291,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq * Account time for a transition between system, hard irq
* or soft irq state. * or soft irq state.
*/ */
void account_system_vtime(struct task_struct *tsk) void vtime_account(struct task_struct *tsk)
{ {
u64 now, nowscaled, delta, deltascaled; u64 now, nowscaled, delta, deltascaled;
unsigned long flags; unsigned long flags;
...@@ -343,14 +343,14 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -343,14 +343,14 @@ void account_system_vtime(struct task_struct *tsk)
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(account_system_vtime); EXPORT_SYMBOL_GPL(vtime_account);
/* /*
* Transfer the user and system times accumulated in the paca * Transfer the user and system times accumulated in the paca
* by the exception entry and exit code to the generic process * by the exception entry and exit code to the generic process
* user and system time records. * user and system time records.
* Must be called with interrupts disabled. * Must be called with interrupts disabled.
* Assumes that account_system_vtime() has been called recently * Assumes that vtime_account() has been called recently
* (i.e. since the last entry from usermode) so that * (i.e. since the last entry from usermode) so that
* get_paca()->user_time_scaled is up to date. * get_paca()->user_time_scaled is up to date.
*/ */
...@@ -366,9 +366,9 @@ void account_process_tick(struct task_struct *tsk, int user_tick) ...@@ -366,9 +366,9 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
account_user_time(tsk, utime, utimescaled); account_user_time(tsk, utime, utimescaled);
} }
void account_switch_vtime(struct task_struct *prev) void vtime_task_switch(struct task_struct *prev)
{ {
account_system_vtime(prev); vtime_account(prev);
account_process_tick(prev, 0); account_process_tick(prev, 0);
} }
......
...@@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) ...@@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
return virt_timer_forward(user + system); return virt_timer_forward(user + system);
} }
void account_switch_vtime(struct task_struct *prev) void vtime_task_switch(struct task_struct *prev)
{ {
struct thread_info *ti; struct thread_info *ti;
...@@ -122,7 +122,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick) ...@@ -122,7 +122,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
* Update process times based on virtual cpu times stored by entry.S * Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock. * to the lowcore fields user_timer, system_timer & steal_clock.
*/ */
void account_system_vtime(struct task_struct *tsk) void vtime_account(struct task_struct *tsk)
{ {
struct thread_info *ti = task_thread_info(tsk); struct thread_info *ti = task_thread_info(tsk);
u64 timer, system; u64 timer, system;
...@@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk) ...@@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk)
virt_timer_forward(system); virt_timer_forward(system);
} }
EXPORT_SYMBOL_GPL(account_system_vtime); EXPORT_SYMBOL_GPL(vtime_account);
void __kprobes vtime_stop_cpu(void) void __kprobes vtime_stop_cpu(void)
{ {
......
...@@ -132,11 +132,11 @@ extern void synchronize_irq(unsigned int irq); ...@@ -132,11 +132,11 @@ extern void synchronize_irq(unsigned int irq);
struct task_struct; struct task_struct;
#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
static inline void account_system_vtime(struct task_struct *tsk) static inline void vtime_account(struct task_struct *tsk)
{ {
} }
#else #else
extern void account_system_vtime(struct task_struct *tsk); extern void vtime_account(struct task_struct *tsk);
#endif #endif
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
...@@ -162,7 +162,7 @@ extern void rcu_nmi_exit(void); ...@@ -162,7 +162,7 @@ extern void rcu_nmi_exit(void);
*/ */
#define __irq_enter() \ #define __irq_enter() \
do { \ do { \
account_system_vtime(current); \ vtime_account(current); \
add_preempt_count(HARDIRQ_OFFSET); \ add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \ trace_hardirq_enter(); \
} while (0) } while (0)
...@@ -178,7 +178,7 @@ extern void irq_enter(void); ...@@ -178,7 +178,7 @@ extern void irq_enter(void);
#define __irq_exit() \ #define __irq_exit() \
do { \ do { \
trace_hardirq_exit(); \ trace_hardirq_exit(); \
account_system_vtime(current); \ vtime_account(current); \
sub_preempt_count(HARDIRQ_OFFSET); \ sub_preempt_count(HARDIRQ_OFFSET); \
} while (0) } while (0)
......
...@@ -131,9 +131,9 @@ extern void account_steal_ticks(unsigned long ticks); ...@@ -131,9 +131,9 @@ extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks); extern void account_idle_ticks(unsigned long ticks);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_switch_vtime(struct task_struct *prev); extern void vtime_task_switch(struct task_struct *prev);
#else #else
static inline void account_switch_vtime(struct task_struct *prev) { } static inline void vtime_task_switch(struct task_struct *prev) { }
#endif #endif
#endif /* _LINUX_KERNEL_STAT_H */ #endif /* _LINUX_KERNEL_STAT_H */
...@@ -685,7 +685,7 @@ static inline int kvm_deassign_device(struct kvm *kvm, ...@@ -685,7 +685,7 @@ static inline int kvm_deassign_device(struct kvm *kvm,
static inline void kvm_guest_enter(void) static inline void kvm_guest_enter(void)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
account_system_vtime(current); vtime_account(current);
current->flags |= PF_VCPU; current->flags |= PF_VCPU;
/* KVM does not hold any references to rcu protected data when it /* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode * switches CPU into a guest mode. In fact switching to a guest mode
...@@ -699,7 +699,7 @@ static inline void kvm_guest_enter(void) ...@@ -699,7 +699,7 @@ static inline void kvm_guest_enter(void)
static inline void kvm_guest_exit(void) static inline void kvm_guest_exit(void)
{ {
account_system_vtime(current); vtime_account(current);
current->flags &= ~PF_VCPU; current->flags &= ~PF_VCPU;
} }
......
...@@ -1764,7 +1764,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) ...@@ -1764,7 +1764,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
* Manfred Spraul <manfred@colorfullife.com> * Manfred Spraul <manfred@colorfullife.com>
*/ */
prev_state = prev->state; prev_state = prev->state;
account_switch_vtime(prev); vtime_task_switch(prev);
finish_arch_switch(prev); finish_arch_switch(prev);
perf_event_task_sched_in(prev, current); perf_event_task_sched_in(prev, current);
finish_lock_switch(rq, prev); finish_lock_switch(rq, prev);
......
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
/* /*
* There are no locks covering percpu hardirq/softirq time. * There are no locks covering percpu hardirq/softirq time.
* They are only modified in account_system_vtime, on corresponding CPU * They are only modified in vtime_account, on corresponding CPU
* with interrupts disabled. So, writes are safe. * with interrupts disabled. So, writes are safe.
* They are read and saved off onto struct rq in update_rq_clock(). * They are read and saved off onto struct rq in update_rq_clock().
* This may result in other CPU reading this CPU's irq time and can * This may result in other CPU reading this CPU's irq time and can
* race with irq/account_system_vtime on this CPU. We would either get old * race with irq/vtime_account on this CPU. We would either get old
* or new value with a side effect of accounting a slice of irq time to wrong * or new value with a side effect of accounting a slice of irq time to wrong
* task when irq is in progress while we read rq->clock. That is a worthy * task when irq is in progress while we read rq->clock. That is a worthy
* compromise in place of having locks on each irq in account_system_time. * compromise in place of having locks on each irq in account_system_time.
...@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq); ...@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
* Called before incrementing preempt_count on {soft,}irq_enter * Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit. * and before decrementing preempt_count on {soft,}irq_exit.
*/ */
void account_system_vtime(struct task_struct *curr) void vtime_account(struct task_struct *curr)
{ {
unsigned long flags; unsigned long flags;
s64 delta; s64 delta;
...@@ -73,7 +73,7 @@ void account_system_vtime(struct task_struct *curr) ...@@ -73,7 +73,7 @@ void account_system_vtime(struct task_struct *curr)
irq_time_write_end(); irq_time_write_end();
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(account_system_vtime); EXPORT_SYMBOL_GPL(vtime_account);
static int irqtime_account_hi_update(void) static int irqtime_account_hi_update(void)
{ {
......
...@@ -220,7 +220,7 @@ asmlinkage void __do_softirq(void) ...@@ -220,7 +220,7 @@ asmlinkage void __do_softirq(void)
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending(); pending = local_softirq_pending();
account_system_vtime(current); vtime_account(current);
__local_bh_disable((unsigned long)__builtin_return_address(0), __local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET); SOFTIRQ_OFFSET);
...@@ -271,7 +271,7 @@ asmlinkage void __do_softirq(void) ...@@ -271,7 +271,7 @@ asmlinkage void __do_softirq(void)
lockdep_softirq_exit(); lockdep_softirq_exit();
account_system_vtime(current); vtime_account(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
...@@ -340,7 +340,7 @@ static inline void invoke_softirq(void) ...@@ -340,7 +340,7 @@ static inline void invoke_softirq(void)
*/ */
void irq_exit(void) void irq_exit(void)
{ {
account_system_vtime(current); vtime_account(current);
trace_hardirq_exit(); trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET); sub_preempt_count(IRQ_EXIT_OFFSET);
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment