Commit f927da17 authored by Isaku Yamahata's avatar Isaku Yamahata Committed by Tony Luck

ia64/pv_ops/pv_time_ops: add sched_clock hook.

add sched_clock() hook to paravirtualize sched_clock().
ia64 sched_clock() is based on ar.itc which isn't stable
on virtualized environment because vcpu may move around on
pcpus. So it needs paravirtualization.
Signed-off-by: default avatarIsaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 496203b1
...@@ -225,6 +225,8 @@ struct pv_time_ops { ...@@ -225,6 +225,8 @@ struct pv_time_ops {
int (*do_steal_accounting)(unsigned long *new_itm); int (*do_steal_accounting)(unsigned long *new_itm);
void (*clocksource_resume)(void); void (*clocksource_resume)(void);
unsigned long long (*sched_clock)(void);
}; };
extern struct pv_time_ops pv_time_ops; extern struct pv_time_ops pv_time_ops;
...@@ -242,6 +244,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) ...@@ -242,6 +244,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
return pv_time_ops.do_steal_accounting(new_itm); return pv_time_ops.do_steal_accounting(new_itm);
} }
static inline unsigned long long paravirt_sched_clock(void)
{
return pv_time_ops.sched_clock();
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#else #else
......
...@@ -40,5 +40,6 @@ get_cycles (void) ...@@ -40,5 +40,6 @@ get_cycles (void)
} }
extern void ia64_cpu_local_tick (void); extern void ia64_cpu_local_tick (void);
extern unsigned long long ia64_native_sched_clock (void);
#endif /* _ASM_IA64_TIMEX_H */ #endif /* _ASM_IA64_TIMEX_H */
...@@ -1050,7 +1050,7 @@ END(ia64_delay_loop) ...@@ -1050,7 +1050,7 @@ END(ia64_delay_loop)
* except that the multiplication and the shift are done with 128-bit * except that the multiplication and the shift are done with 128-bit
* intermediate precision so that we can produce a full 64-bit result. * intermediate precision so that we can produce a full 64-bit result.
*/ */
GLOBAL_ENTRY(sched_clock) GLOBAL_ENTRY(ia64_native_sched_clock)
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc) mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;; ;;
...@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock) ...@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock)
;; ;;
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp br.ret.sptk.many rp
END(sched_clock) END(ia64_native_sched_clock)
#ifndef CONFIG_PARAVIRT
//unsigned long long
//sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
.global sched_clock
sched_clock = ia64_native_sched_clock
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime) GLOBAL_ENTRY(cycle_to_cputime)
......
...@@ -366,4 +366,5 @@ ia64_native_do_steal_accounting(unsigned long *new_itm) ...@@ -366,4 +366,5 @@ ia64_native_do_steal_accounting(unsigned long *new_itm)
struct pv_time_ops pv_time_ops = { struct pv_time_ops pv_time_ops = {
.do_steal_accounting = ia64_native_do_steal_accounting, .do_steal_accounting = ia64_native_do_steal_accounting,
.sched_clock = ia64_native_sched_clock,
}; };
...@@ -49,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip); ...@@ -49,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
#endif #endif
#ifdef CONFIG_PARAVIRT
/* We need to define a real function for sched_clock, to override the
weak default version */
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
static void static void
paravirt_clocksource_resume(void) paravirt_clocksource_resume(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment