Commit 4dd5b673 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Purge TLB entries after updating page table entry and set page accessed flag in TLB handler

This patch may resolve some races in TLB handling.  Hopefully, TLB
inserts are accesses and protected by spin lock.

If not, we may need to IPI calls and do local purges on PA 2.0.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent d27dfa13
...@@ -66,9 +66,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -66,9 +66,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
unsigned long flags; \ unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \ spin_lock_irqsave(&pa_tlb_lock, flags); \
old_pte = *ptep; \ old_pte = *ptep; \
set_pte(ptep, pteval); \
if (pte_inserted(old_pte)) \ if (pte_inserted(old_pte)) \
purge_tlb_entries(mm, addr); \ purge_tlb_entries(mm, addr); \
set_pte(ptep, pteval); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \ spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0) } while (0)
...@@ -202,7 +202,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -202,7 +202,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
...@@ -227,22 +227,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -227,22 +227,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
/* Others seem to make this executable, I don't know if that's correct /* Others seem to make this executable, I don't know if that's correct
or not. The stack is mapped this way though so this is necessary or not. The stack is mapped this way though so this is necessary
in the short term - dhd@linuxcare.com, 2000-08-08 */ in the short term - dhd@linuxcare.com, 2000-08-08 */
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
#define PAGE_COPY PAGE_EXECREAD #define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
/* /*
...@@ -479,8 +479,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned ...@@ -479,8 +479,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0; return 0;
} }
purge_tlb_entries(vma->vm_mm, addr);
set_pte(ptep, pte_mkold(pte)); set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1; return 1;
} }
...@@ -493,9 +493,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -493,9 +493,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep; old_pte = *ptep;
set_pte(ptep, __pte(0));
if (pte_inserted(old_pte)) if (pte_inserted(old_pte))
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
set_pte(ptep, __pte(0));
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte; return old_pte;
...@@ -505,8 +505,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, ...@@ -505,8 +505,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(&pa_tlb_lock, flags);
purge_tlb_entries(mm, addr);
set_pte(ptep, pte_wrprotect(*ptep)); set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(&pa_tlb_lock, flags);
} }
......
...@@ -483,9 +483,7 @@ ...@@ -483,9 +483,7 @@
.macro tlb_unlock0 spc,tmp .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0 or,COND(=) %r0,\spc,%r0
sync stw,ma \spc,0(\tmp)
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif #endif
.endm .endm
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment