Commit 6b9c9b8a authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Benjamin Herrenschmidt

powerpc: Convert native_tlbie_lock to raw_spinlock

native_tlbie_lock needs to be a real spinlock in RT. Convert it to
raw_spinlock.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 5181e790
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3 #define HPTE_LOCK_BIT 3
static DEFINE_SPINLOCK(native_tlbie_lock); static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize) static inline void __tlbie(unsigned long va, int psize, int ssize)
{ {
...@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) ...@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if (use_local) if (use_local)
use_local = mmu_psize_defs[psize].tlbiel; use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local) if (lock_tlbie && !use_local)
spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
if (use_local) { if (use_local) {
__tlbiel(va, psize, ssize); __tlbiel(va, psize, ssize);
...@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local) ...@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm volatile("eieio; tlbsync; ptesync": : :"memory"); asm volatile("eieio; tlbsync; ptesync": : :"memory");
} }
if (lock_tlbie && !use_local) if (lock_tlbie && !use_local)
spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} }
static inline void native_lock_hpte(struct hash_pte *hptep) static inline void native_lock_hpte(struct hash_pte *hptep)
...@@ -433,7 +433,7 @@ static void native_hpte_clear(void) ...@@ -433,7 +433,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will /* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once. * deadlock if we try to tlbie from two processors at once.
*/ */
spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP; slots = pteg_count * HPTES_PER_GROUP;
...@@ -457,7 +457,7 @@ static void native_hpte_clear(void) ...@@ -457,7 +457,7 @@ static void native_hpte_clear(void)
} }
asm volatile("eieio; tlbsync; ptesync":::"memory"); asm volatile("eieio; tlbsync; ptesync":::"memory");
spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -520,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local) ...@@ -520,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie) if (lock_tlbie)
spin_lock(&native_tlbie_lock); raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) { for (i = 0; i < number; i++) {
...@@ -535,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local) ...@@ -535,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("eieio; tlbsync; ptesync":::"memory"); asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie) if (lock_tlbie)
spin_unlock(&native_tlbie_lock); raw_spin_unlock(&native_tlbie_lock);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment