Commit af2f859d authored by David Mosberger's avatar David Mosberger

ia64: Improve layout of cpuinfo_ia64

Stephane made some measurements on the access-pattern of the cpuinfo_ia64
members and based on those results, this patch reorganizes the structure
for better cache-line sharing.
parent 08422cec
...@@ -91,9 +91,6 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs) ...@@ -91,9 +91,6 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
unsigned long ops; unsigned long ops;
/* Count this now; we may make a call that never returns. */
local_cpu_data->ipi_count++;
mb(); /* Order interrupt and bit testing. */ mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) { while ((ops = xchg(pending_ipis, 0)) != 0) {
mb(); /* Order bit clearing and data access. */ mb(); /* Order bit clearing and data access. */
...@@ -338,17 +335,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai ...@@ -338,17 +335,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
} }
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
void
smp_do_timer (struct pt_regs *regs)
{
int user = user_mode(regs);
if (--local_cpu_data->prof_counter <= 0) {
local_cpu_data->prof_counter = local_cpu_data->prof_multiplier;
update_process_times(user);
}
}
/* /*
* this function calls the 'stop' function on all other CPUs in the system. * this function calls the 'stop' function on all other CPUs in the system.
*/ */
......
...@@ -265,8 +265,6 @@ ia64_sync_itc (unsigned int master) ...@@ -265,8 +265,6 @@ ia64_sync_itc (unsigned int master)
static inline void __init static inline void __init
smp_setup_percpu_timer (void) smp_setup_percpu_timer (void)
{ {
local_cpu_data->prof_counter = 1;
local_cpu_data->prof_multiplier = 1;
} }
static void __init static void __init
......
...@@ -255,9 +255,13 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ...@@ -255,9 +255,13 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
ia64_do_profile(regs); ia64_do_profile(regs);
while (1) { while (1) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_do_timer(regs); /*
* For UP, this is done in do_timer(). Weird, but
* fixing that would require updates to all
* platforms.
*/
update_process_times(user_mode(regs));
#endif #endif
new_itm += local_cpu_data->itm_delta; new_itm += local_cpu_data->itm_delta;
...@@ -280,17 +284,19 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ...@@ -280,17 +284,19 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
} }
do { do {
/* /*
* If we're too close to the next clock tick for comfort, we increase the * If we're too close to the next clock tick for
* safety margin by intentionally dropping the next tick(s). We do NOT update * comfort, we increase the safety margin by
* itm.next because that would force us to call do_timer() which in turn would * intentionally dropping the next tick(s). We do NOT
* let our clock run too fast (with the potentially devastating effect of * update itm.next because that would force us to call
* losing monotony of time). * do_timer() which in turn would let our clock run
*/ * too fast (with the potentially devastating effect
while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) * of losing monotony of time).
new_itm += local_cpu_data->itm_delta; */
ia64_set_itm(new_itm); while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
/* double check, in case we got hit by a (slow) PMI: */ new_itm += local_cpu_data->itm_delta;
ia64_set_itm(new_itm);
/* double check, in case we got hit by a (slow) PMI: */
} while (time_after_eq(ia64_get_itc(), new_itm)); } while (time_after_eq(ia64_get_itc(), new_itm));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -148,35 +148,35 @@ struct cpuinfo_ia64 { ...@@ -148,35 +148,35 @@ struct cpuinfo_ia64 {
__u32 softirq_pending; __u32 softirq_pending;
__u64 itm_delta; /* # of clock cycles between clock ticks */ __u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */ __u64 itm_next; /* interval timer mask value to use for next clock tick */
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
__u64 *pgd_quick; __u64 *pgd_quick;
__u64 *pmd_quick; __u64 *pmd_quick;
__u64 pgtable_cache_sz; __u64 pgtable_cache_sz;
/* CPUID-derived information: */
__u64 ppn;
__u64 features;
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
char vendor[16];
__u64 itc_freq; /* frequency of ITC counter */ __u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */ __u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */ __u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
__u64 ptce_base; __u64 ptce_base;
__u32 ptce_count[2]; __u32 ptce_count[2];
__u32 ptce_stride[2]; __u32 ptce_stride[2];
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpu;
__u64 loops_per_jiffy; __u64 loops_per_jiffy;
__u64 ipi_count; int cpu;
__u64 prof_counter;
__u64 prof_multiplier;
#endif #endif
/* CPUID-derived information: */
__u64 ppn;
__u64 features;
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
char vendor[16];
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct ia64_node_data *node_data; struct ia64_node_data *node_data;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment