Commit 9cfb9b3c authored by Martin Schwidefsky's avatar Martin Schwidefsky

[PATCH] improve idle cputime accounting

Distinguish the cputime of the idle process where idle is actually using
cpu cycles from the cputime where idle is sleeping on an enabled wait psw.
The former is accounted as system time, the later as idle time.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 6f430924
...@@ -21,12 +21,12 @@ struct s390_idle_data { ...@@ -21,12 +21,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle); DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
void s390_idle_leave(void); void vtime_start_cpu(void);
static inline void s390_idle_check(void) static inline void s390_idle_check(void)
{ {
if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
s390_idle_leave(); vtime_start_cpu();
} }
#endif /* _ASM_S390_CPU_H_ */ #endif /* _ASM_S390_CPU_H_ */
...@@ -23,20 +23,18 @@ struct vtimer_list { ...@@ -23,20 +23,18 @@ struct vtimer_list {
__u64 expires; __u64 expires;
__u64 interval; __u64 interval;
spinlock_t lock;
unsigned long magic;
void (*function)(unsigned long); void (*function)(unsigned long);
unsigned long data; unsigned long data;
}; };
/* the offset value will wrap after ca. 71 years */ /* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue { struct vtimer_queue {
struct list_head list; struct list_head list;
spinlock_t lock; spinlock_t lock;
__u64 to_expire; /* current event expire time */ __u64 timer; /* last programmed timer */
__u64 offset; /* list offset to zero */ __u64 elapsed; /* elapsed time of timer expire values */
__u64 idle; /* temp var for idle */ __u64 idle; /* temp var for idle */
int do_spt; /* =1: reprogram cpu timer in idle */
}; };
extern void init_virt_timer(struct vtimer_list *timer); extern void init_virt_timer(struct vtimer_list *timer);
...@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer); ...@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void); extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
extern void vtime_start_cpu_timer(void); extern void vtime_stop_cpu(void);
extern void vtime_stop_cpu_timer(void); extern void vtime_start_leave(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -583,8 +583,8 @@ kernel_per: ...@@ -583,8 +583,8 @@ kernel_per:
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
...@@ -723,8 +723,8 @@ io_notify_resume: ...@@ -723,8 +723,8 @@ io_notify_resume:
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16 SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
...@@ -750,6 +750,7 @@ __critical_end: ...@@ -750,6 +750,7 @@ __critical_end:
.globl mcck_int_handler .globl mcck_int_handler
mcck_int_handler: mcck_int_handler:
stck __LC_INT_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
......
...@@ -559,8 +559,8 @@ kernel_per: ...@@ -559,8 +559,8 @@ kernel_per:
*/ */
.globl io_int_handler .globl io_int_handler
io_int_handler: io_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
...@@ -721,8 +721,8 @@ io_notify_resume: ...@@ -721,8 +721,8 @@ io_notify_resume:
*/ */
.globl ext_int_handler .globl ext_int_handler
ext_int_handler: ext_int_handler:
stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK stck __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32 SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
...@@ -746,6 +746,7 @@ __critical_end: ...@@ -746,6 +746,7 @@ __critical_end:
*/ */
.globl mcck_int_handler .globl mcck_int_handler
mcck_int_handler: mcck_int_handler:
stck __LC_INT_CLOCK
la %r1,4095 # revalidate r1 la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/cpu.h>
#include "entry.h" #include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
...@@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk) ...@@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8]; return sf->gprs[8];
} }
DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
};
void s390_idle_leave(void)
{
struct s390_idle_data *idle;
unsigned long long idle_time;
idle = &__get_cpu_var(s390_idle);
idle_time = S390_lowcore.int_clock - idle->idle_enter;
spin_lock(&idle->lock);
idle->idle_time += idle_time;
idle->idle_enter = 0ULL;
idle->idle_count++;
spin_unlock(&idle->lock);
vtime_start_cpu_timer();
}
extern void s390_handle_mcck(void); extern void s390_handle_mcck(void);
/* /*
* The idle loop on a S390... * The idle loop on a S390...
*/ */
static void default_idle(void) static void default_idle(void)
{ {
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
unsigned long addr;
psw_t psw;
/* CPU is going idle. */ /* CPU is going idle. */
local_irq_disable(); local_irq_disable();
if (need_resched()) { if (need_resched()) {
...@@ -120,7 +96,6 @@ static void default_idle(void) ...@@ -120,7 +96,6 @@ static void default_idle(void)
local_mcck_disable(); local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) { if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable(); local_mcck_enable();
s390_idle_leave();
local_irq_enable(); local_irq_enable();
s390_handle_mcck(); s390_handle_mcck();
return; return;
...@@ -128,42 +103,9 @@ static void default_idle(void) ...@@ -128,42 +103,9 @@ static void default_idle(void)
trace_hardirqs_on(); trace_hardirqs_on();
/* Don't trace preempt off for idle. */ /* Don't trace preempt off for idle. */
stop_critical_timings(); stop_critical_timings();
vtime_stop_cpu_timer(); /* Stop virtual timer and halt the cpu. */
vtime_stop_cpu();
/* /* Reenable preemption tracer. */
* The inline assembly is equivalent to
* idle->idle_enter = get_clock();
* __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
* PSW_MASK_IO | PSW_MASK_EXT);
* The difference is that the inline assembly makes sure that
* the stck instruction is right before the lpsw instruction.
* This is done to increase the precision.
*/
/* Wait for external, I/O or machine check interrupt. */
psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
#ifndef __s390x__
asm volatile(
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,4(%2)\n"
" stck 0(%3)\n"
" lpsw 0(%2)\n"
"1:"
: "=&d" (addr), "=m" (idle->idle_enter)
: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
: "memory", "cc");
#else /* __s390x__ */
asm volatile(
" larl %0,1f\n"
" stg %0,8(%2)\n"
" stck 0(%3)\n"
" lpswe 0(%2)\n"
"1:"
: "=&d" (addr), "=m" (idle->idle_enter)
: "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
: "memory", "cc");
#endif /* __s390x__ */
start_critical_timings(); start_critical_timings();
} }
......
...@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code) ...@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs; struct pt_regs *old_regs;
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter();
s390_idle_check(); s390_idle_check();
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */ /* Serve timer interrupts first. */
clock_comparator_work(); clock_comparator_work();
......
This diff is collapsed.
...@@ -632,8 +632,8 @@ do_IRQ (struct pt_regs *regs) ...@@ -632,8 +632,8 @@ do_IRQ (struct pt_regs *regs)
struct pt_regs *old_regs; struct pt_regs *old_regs;
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
irq_enter();
s390_idle_check(); s390_idle_check();
irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */ /* Serve timer interrupts first. */
clock_comparator_work(); clock_comparator_work();
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/etr.h> #include <asm/etr.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/cpu.h>
#include "s390mach.h" #include "s390mach.h"
static struct semaphore m_sem; static struct semaphore m_sem;
...@@ -369,6 +370,8 @@ s390_do_machine_check(struct pt_regs *regs) ...@@ -369,6 +370,8 @@ s390_do_machine_check(struct pt_regs *regs)
lockdep_off(); lockdep_off();
s390_idle_check();
mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
mcck = &__get_cpu_var(cpu_mcck); mcck = &__get_cpu_var(cpu_mcck);
umode = user_mode(regs); umode = user_mode(regs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment