Commit 3ad0e83c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'parisc-4.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fixes from Helge Deller:
 "On parisc we were still seeing occasional random segmentation faults
  and memory corruption on SMP machines. Dave Anglin then looked again
  at the TLB related code and found two issues in the PCI DMA and
  generic TLB flush functions.

  Then, in our startup code we had some timing of the cache and TLB
  functions to calculate a threshold when to use a complete TLB/cache
  flush or just to flush a specific range. This code produced a race
  with newly started CPUs and thus lead to occasional kernel crashes
  (due to stale TLB/cache entries). The patch by Dave fixes this issue
  by flushing the local caches before starting secondary CPUs and by
  removing the race.

  The last problem fixed by this series is that we quite often suffered
  from hung tasks and self-detected stalls on the CPUs. It was somehow
  clear that this was related to the (in v4.7) newly introduced cr16
  clocksource and the own implementation of sched_clock(). I replaced
  the open-coded sched_clock() function and switched to the generic
  sched_clock() implementation which seems to have fixed this isse as
  well.

  All patches have been sucessfully tested on a variety of machines,
  including our debian buildd servers.

  All patches (beside the small pr_cont fix) are tagged for stable
  releases"

* 'parisc-4.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Also flush data TLB in flush_icache_page_asm
  parisc: Fix race in pci-dma.c
  parisc: Switch to generic sched_clock implementation
  parisc: Fix races in parisc_setup_cache_timing()
  parisc: Fix printk continuations in system detection
parents 86b01b54 5035b230
...@@ -34,7 +34,9 @@ config PARISC ...@@ -34,7 +34,9 @@ config PARISC
select HAVE_ARCH_HASH select HAVE_ARCH_HASH
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) select GENERIC_SCHED_CLOCK
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select GENERIC_CLOCKEVENTS
select ARCH_NO_COHERENT_DMA_MMAP select ARCH_NO_COHERENT_DMA_MMAP
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
......
...@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void) ...@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
{ {
unsigned long rangetime, alltime; unsigned long rangetime, alltime;
unsigned long size, start; unsigned long size, start;
unsigned long threshold;
alltime = mfctl(16); alltime = mfctl(16);
flush_data_cache(); flush_data_cache();
...@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void) ...@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime); alltime, size, rangetime);
/* Racy, but if we see an intermediate value, it's ok too... */ threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
parisc_cache_flush_threshold = size * alltime / rangetime; if (threshold > cache_info.dc_size)
threshold = cache_info.dc_size;
parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); if (threshold)
if (!parisc_cache_flush_threshold) parisc_cache_flush_threshold = threshold;
parisc_cache_flush_threshold = FLUSH_THRESHOLD; printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
if (parisc_cache_flush_threshold > cache_info.dc_size)
parisc_cache_flush_threshold = cache_info.dc_size;
printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
parisc_cache_flush_threshold/1024); parisc_cache_flush_threshold/1024);
/* calculate TLB flush threshold */ /* calculate TLB flush threshold */
...@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void) ...@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
flush_tlb_all(); flush_tlb_all();
alltime = mfctl(16) - alltime; alltime = mfctl(16) - alltime;
size = PAGE_SIZE; size = 0;
start = (unsigned long) _text; start = (unsigned long) _text;
rangetime = mfctl(16); rangetime = mfctl(16);
while (start < (unsigned long) _end) { while (start < (unsigned long) _end) {
...@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void) ...@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime); alltime, size, rangetime);
parisc_tlb_flush_threshold = size * alltime / rangetime; threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
parisc_tlb_flush_threshold *= num_online_cpus(); if (threshold)
parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); parisc_tlb_flush_threshold = threshold;
if (!parisc_tlb_flush_threshold) printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
parisc_tlb_flush_threshold/1024); parisc_tlb_flush_threshold/1024);
} }
......
...@@ -58,7 +58,7 @@ void __init setup_pdc(void) ...@@ -58,7 +58,7 @@ void __init setup_pdc(void)
status = pdc_system_map_find_mods(&module_result, &module_path, 0); status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) { if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP; pdc_type = PDC_TYPE_SYSTEM_MAP;
printk("System Map.\n"); pr_cont("System Map.\n");
return; return;
} }
...@@ -77,7 +77,7 @@ void __init setup_pdc(void) ...@@ -77,7 +77,7 @@ void __init setup_pdc(void)
status = pdc_pat_cell_get_number(&cell_info); status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) { if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT; pdc_type = PDC_TYPE_PAT;
printk("64 bit PAT.\n"); pr_cont("64 bit PAT.\n");
return; return;
} }
#endif #endif
...@@ -97,12 +97,12 @@ void __init setup_pdc(void) ...@@ -97,12 +97,12 @@ void __init setup_pdc(void)
case 0xC: /* 715/64, at least */ case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE; pdc_type = PDC_TYPE_SNAKE;
printk("Snake.\n"); pr_cont("Snake.\n");
return; return;
default: /* Everything else */ default: /* Everything else */
printk("Unsupported.\n"); pr_cont("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
} }
} }
......
...@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ ...@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
fitmanymiddle: /* Loop if LOOP >= 2 */ fitmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
pitlbe 0(%sr1, %r28) pitlbe %r0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */ copy %arg3, %r31 /* Re-init inner loop count */
...@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ ...@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
fdtmanymiddle: /* Loop if LOOP >= 2 */ fdtmanymiddle: /* Loop if LOOP >= 2 */
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
pdtlbe 0(%sr1, %r28) pdtlbe %r0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */ copy %arg3, %r31 /* Re-init inner loop count */
...@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm) ...@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm)
/* Purge any old translations */ /* Purge any old translations */
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l 0(%r28) pdtlb,l %r0(%r28)
pdtlb,l 0(%r29) pdtlb,l %r0(%r29)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pdtlb 0(%r28) pdtlb %r0(%r28)
pdtlb 0(%r29) pdtlb %r0(%r29)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
...@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm) ...@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm)
/* Purge any old translation */ /* Purge any old translation */
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l 0(%r28) pdtlb,l %r0(%r28)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pdtlb 0(%r28) pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
...@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm) ...@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm)
/* Purge any old translation */ /* Purge any old translation */
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l 0(%r28) pdtlb,l %r0(%r28)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pdtlb 0(%r28) pdtlb %r0(%r28)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
...@@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm) ...@@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm)
sync sync
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l 0(%r25) pdtlb,l %r0(%r25)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pdtlb 0(%r25) pdtlb %r0(%r25)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
...@@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm) ...@@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm)
depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif #endif
/* Purge any old translation */ /* Purge any old translation. Note that the FIC instruction
* may use either the instruction or data TLB. Given that we
* have a flat address space, it's not clear which TLB will be
* used. So, we purge both entries. */
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r28) pitlb,l %r0(%sr4,%r28)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pitlb (%sr4,%r28) pdtlb %r0(%r28)
pitlb %r0(%sr4,%r28)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
...@@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm) ...@@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm)
sync sync
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
pitlb,l %r0(%sr4,%r25) pitlb,l %r0(%sr4,%r25)
#else #else
tlb_lock %r20,%r21,%r22 tlb_lock %r20,%r21,%r22
pitlb (%sr4,%r25) pdtlb %r0(%r28)
pitlb %r0(%sr4,%r25)
tlb_unlock %r20,%r21,%r22 tlb_unlock %r20,%r21,%r22
#endif #endif
......
...@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, ...@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
if (!pte_none(*pte)) if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n"); printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(flags); purge_tlb_start(flags);
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
pdtlb_kernel(orig_vaddr); pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags); purge_tlb_end(flags);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
......
...@@ -334,6 +334,10 @@ static int __init parisc_init(void) ...@@ -334,6 +334,10 @@ static int __init parisc_init(void)
/* tell PDC we're Linux. Nevermind failure. */ /* tell PDC we're Linux. Nevermind failure. */
pdc_stable_write(0x40, &osid, sizeof(osid)); pdc_stable_write(0x40, &osid, sizeof(osid));
/* start with known state */
flush_cache_all_local();
flush_tlb_all_local(NULL);
processor_init(); processor_init();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/rtc.h> #include <linux/rtc.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched_clock.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -39,18 +40,6 @@ ...@@ -39,18 +40,6 @@
static unsigned long clocktick __read_mostly; /* timer cycles per tick */ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
#ifndef CONFIG_64BIT
/*
* The processor-internal cycle counter (Control Register 16) is used as time
* source for the sched_clock() function. This register is 64bit wide on a
* 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
* requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
* with a per-cpu variable which we increase every time the counter
* wraps-around (which happens every ~4 secounds).
*/
static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
#endif
/* /*
* We keep time on PA-RISC Linux by using the Interval Timer which is * We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both * a pair of registers; one is read-only and one is write-only; both
...@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) ...@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
*/ */
mtctl(next_tick, 16); mtctl(next_tick, 16);
#if !defined(CONFIG_64BIT)
/* check for overflow on a 32bit kernel (every ~4 seconds). */
if (unlikely(next_tick < now))
this_cpu_inc(cr16_high_32_bits);
#endif
/* Skip one clocktick on purpose if we missed next_tick. /* Skip one clocktick on purpose if we missed next_tick.
* The new CR16 must be "later" than current CR16 otherwise * The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds * itimer would not fire until CR16 wrapped - e.g 4 seconds
...@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); ...@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
/* clock source code */ /* clock source code */
static cycle_t read_cr16(struct clocksource *cs) static cycle_t notrace read_cr16(struct clocksource *cs)
{ {
return get_cycles(); return get_cycles();
} }
...@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts) ...@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
} }
/* static u64 notrace read_cr16_sched_clock(void)
* sched_clock() framework
*/
static u32 cyc2ns_mul __read_mostly;
static u32 cyc2ns_shift __read_mostly;
u64 sched_clock(void)
{ {
u64 now; return get_cycles();
/* Get current cycle counter (Control Register 16). */
#ifdef CONFIG_64BIT
now = mfctl(16);
#else
now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
#endif
/* return the value in ns (cycles_2_ns) */
return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
} }
...@@ -316,17 +282,16 @@ u64 sched_clock(void) ...@@ -316,17 +282,16 @@ u64 sched_clock(void)
void __init time_init(void) void __init time_init(void)
{ {
unsigned long current_cr16_khz; unsigned long cr16_hz;
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocktick = (100 * PAGE0->mem_10msec) / HZ; clocktick = (100 * PAGE0->mem_10msec) / HZ;
/* calculate mult/shift values for cr16 */
clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
NSEC_PER_MSEC, 0);
start_cpu_itimer(); /* get CPU 0 started */ start_cpu_itimer(); /* get CPU 0 started */
cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
/* register at clocksource framework */ /* register at clocksource framework */
clocksource_register_khz(&clocksource_cr16, current_cr16_khz); clocksource_register_hz(&clocksource_cr16, cr16_hz);
/* register as sched_clock source */
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment