Commit e82a3b75 authored by Helge Deller's avatar Helge Deller Committed by Kyle McMartin

parisc: ensure broadcast tlb purge runs single threaded

The TLB flushing functions on hppa, which causes PxTLB broadcasts on the system
bus, needs to be protected by irq-safe spinlocks to avoid irq handlers to deadlock
the kernel. The deadlocks only happened during I/O intensive loads and triggered
pretty seldom, which is why this bug went so long unnoticed.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
[edited to use spin_lock_irqsave on UP as well since we'd been locking there
 all this time anyway, --kyle]
Signed-off-by: default avatarKyle McMartin <kyle@mcmartin.ca>
parent 84be31be
...@@ -12,14 +12,12 @@ ...@@ -12,14 +12,12 @@
* N class systems, only one PxTLB inter processor broadcast can be * N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge * active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate * synchronisation is fairly lightweight and harmless so we activate
* it on all SMP systems not just the N class. We also need to have * it on all systems not just the N class.
* preemption disabled on uniprocessor machines, and spin_lock does that
* nicely.
*/ */
extern spinlock_t pa_tlb_lock; extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock) #define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) #define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *); extern void flush_tlb_all_local(void *);
...@@ -63,14 +61,16 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -63,14 +61,16 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr) unsigned long addr)
{ {
unsigned long flags;
/* For one page, it's not worth testing the split_tlb variable */ /* For one page, it's not worth testing the split_tlb variable */
mb(); mb();
mtsp(vma->vm_mm->context,1); mtsp(vma->vm_mm->context,1);
purge_tlb_start(); purge_tlb_start(flags);
pdtlb(addr); pdtlb(addr);
pitlb(addr); pitlb(addr);
purge_tlb_end(); purge_tlb_end(flags);
} }
void __flush_tlb_range(unsigned long sid, void __flush_tlb_range(unsigned long sid,
......
...@@ -397,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm); ...@@ -397,12 +397,13 @@ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
void clear_user_page_asm(void *page, unsigned long vaddr) void clear_user_page_asm(void *page, unsigned long vaddr)
{ {
unsigned long flags;
/* This function is implemented in assembly in pacache.S */ /* This function is implemented in assembly in pacache.S */
extern void __clear_user_page_asm(void *page, unsigned long vaddr); extern void __clear_user_page_asm(void *page, unsigned long vaddr);
purge_tlb_start(); purge_tlb_start(flags);
__clear_user_page_asm(page, vaddr); __clear_user_page_asm(page, vaddr);
purge_tlb_end(); purge_tlb_end(flags);
} }
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
...@@ -443,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr); ...@@ -443,20 +444,24 @@ extern void clear_user_page_asm(void *page, unsigned long vaddr);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg) void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{ {
unsigned long flags;
purge_kernel_dcache_page((unsigned long)page); purge_kernel_dcache_page((unsigned long)page);
purge_tlb_start(); purge_tlb_start(flags);
pdtlb_kernel(page); pdtlb_kernel(page);
purge_tlb_end(); purge_tlb_end(flags);
clear_user_page_asm(page, vaddr); clear_user_page_asm(page, vaddr);
} }
EXPORT_SYMBOL(clear_user_page); EXPORT_SYMBOL(clear_user_page);
void flush_kernel_dcache_page_addr(void *addr) void flush_kernel_dcache_page_addr(void *addr)
{ {
unsigned long flags;
flush_kernel_dcache_page_asm(addr); flush_kernel_dcache_page_asm(addr);
purge_tlb_start(); purge_tlb_start(flags);
pdtlb_kernel(addr); pdtlb_kernel(addr);
purge_tlb_end(); purge_tlb_end(flags);
} }
EXPORT_SYMBOL(flush_kernel_dcache_page_addr); EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
...@@ -489,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, ...@@ -489,8 +494,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all(); flush_tlb_all();
else { else {
unsigned long flags;
mtsp(sid, 1); mtsp(sid, 1);
purge_tlb_start(); purge_tlb_start(flags);
if (split_tlb) { if (split_tlb) {
while (npages--) { while (npages--) {
pdtlb(start); pdtlb(start);
...@@ -503,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, ...@@ -503,7 +510,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
start += PAGE_SIZE; start += PAGE_SIZE;
} }
} }
purge_tlb_end(); purge_tlb_end(flags);
} }
} }
......
...@@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte, ...@@ -90,12 +90,14 @@ static inline int map_pte_uncached(pte_t * pte,
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
do { do {
unsigned long flags;
if (!pte_none(*pte)) if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n"); printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(); purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr); pdtlb_kernel(orig_vaddr);
purge_tlb_end(); purge_tlb_end(flags);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE; (*paddr_ptr) += PAGE_SIZE;
...@@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, ...@@ -168,11 +170,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
do { do {
unsigned long flags;
pte_t page = *pte; pte_t page = *pte;
pte_clear(&init_mm, vaddr, pte); pte_clear(&init_mm, vaddr, pte);
purge_tlb_start(); purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr); pdtlb_kernel(orig_vaddr);
purge_tlb_end(); purge_tlb_end(flags);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE; orig_vaddr += PAGE_SIZE;
pte++; pte++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment