Commit af2f859d authored by David Mosberger's avatar David Mosberger

ia64: Improve layout of cpuinfo_ia64

Stephane made some measurements on the access-pattern of the cpuinfo_ia64
members and based on those results, this patch reorganizes the structure
for better cache-line sharing.
parent 08422cec
......@@ -91,9 +91,6 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
unsigned long ops;
/* Count this now; we may make a call that never returns. */
local_cpu_data->ipi_count++;
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
mb(); /* Order bit clearing and data access. */
......@@ -338,17 +335,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
}
EXPORT_SYMBOL(smp_call_function);
void
smp_do_timer (struct pt_regs *regs)
{
int user = user_mode(regs);
if (--local_cpu_data->prof_counter <= 0) {
local_cpu_data->prof_counter = local_cpu_data->prof_multiplier;
update_process_times(user);
}
}
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
......
......@@ -265,8 +265,6 @@ ia64_sync_itc (unsigned int master)
static inline void __init
smp_setup_percpu_timer (void)
{
local_cpu_data->prof_counter = 1;
local_cpu_data->prof_multiplier = 1;
}
static void __init
......
......@@ -255,9 +255,13 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
ia64_do_profile(regs);
while (1) {
#ifdef CONFIG_SMP
smp_do_timer(regs);
/*
* For UP, this is done in do_timer(). Weird, but
* fixing that would require updates to all
* platforms.
*/
update_process_times(user_mode(regs));
#endif
new_itm += local_cpu_data->itm_delta;
......@@ -281,11 +285,13 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
do {
/*
* If we're too close to the next clock tick for comfort, we increase the
* safety margin by intentionally dropping the next tick(s). We do NOT update
* itm.next because that would force us to call do_timer() which in turn would
* let our clock run too fast (with the potentially devastating effect of
* losing monotony of time).
* If we're too close to the next clock tick for
* comfort, we increase the safety margin by
* intentionally dropping the next tick(s). We do NOT
* update itm.next because that would force us to call
* do_timer() which in turn would let our clock run
* too fast (with the potentially devastating effect
* of losing monotony of time).
*/
while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
new_itm += local_cpu_data->itm_delta;
......
......@@ -148,35 +148,35 @@ struct cpuinfo_ia64 {
__u32 softirq_pending;
__u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
__u64 *pgd_quick;
__u64 *pmd_quick;
__u64 pgtable_cache_sz;
/* CPUID-derived information: */
__u64 ppn;
__u64 features;
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
char vendor[16];
__u64 itc_freq; /* frequency of ITC counter */
__u64 proc_freq; /* frequency of processor */
__u64 cyc_per_usec; /* itc_freq/1000000 */
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
__u64 ptce_base;
__u32 ptce_count[2];
__u32 ptce_stride[2];
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
#ifdef CONFIG_SMP
int cpu;
__u64 loops_per_jiffy;
__u64 ipi_count;
__u64 prof_counter;
__u64 prof_multiplier;
int cpu;
#endif
/* CPUID-derived information: */
__u64 ppn;
__u64 features;
__u8 number;
__u8 revision;
__u8 model;
__u8 family;
__u8 archrev;
char vendor[16];
#ifdef CONFIG_NUMA
struct ia64_node_data *node_data;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment