Commit 890d643b authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Linus Torvalds

[PATCH] ppc64: Fix some bogus warnings & cleanup tlbie code path

This patch fixes some warnings that popped up with the removal of
-Wno-uninitialized around the code doing tlbie's.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4289d36e
...@@ -220,10 +220,12 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -220,10 +220,12 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
tlbiel(va); tlbiel(va);
} else { } else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
spin_lock(&pSeries_tlbie_lock); spin_lock(&pSeries_tlbie_lock);
tlbie(va, large); tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_unlock(&pSeries_tlbie_lock); spin_unlock(&pSeries_tlbie_lock);
} }
...@@ -243,6 +245,7 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -243,6 +245,7 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
unsigned long vsid, va, vpn, flags; unsigned long vsid, va, vpn, flags;
long slot; long slot;
HPTE *hptep; HPTE *hptep;
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff); va = (vsid << 28) | (ea & 0x0fffffff);
...@@ -256,10 +259,10 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -256,10 +259,10 @@ static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
set_pp_bit(newpp, hptep); set_pp_bit(newpp, hptep);
/* Ensure it is out of the tlb too */ /* Ensure it is out of the tlb too */
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_lock_irqsave(&pSeries_tlbie_lock, flags); spin_lock_irqsave(&pSeries_tlbie_lock, flags);
tlbie(va, 0); tlbie(va, 0);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_tlbie_lock, flags); spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
} }
...@@ -270,6 +273,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -270,6 +273,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
Hpte_dword0 dw0; Hpte_dword0 dw0;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE);
if (large) if (large)
avpn &= ~0x1UL; avpn &= ~0x1UL;
...@@ -291,10 +295,10 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -291,10 +295,10 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) { if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
tlbiel(va); tlbiel(va);
} else { } else {
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_lock(&pSeries_tlbie_lock); spin_lock(&pSeries_tlbie_lock);
tlbie(va, large); tlbie(va, large);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_unlock(&pSeries_tlbie_lock); spin_unlock(&pSeries_tlbie_lock);
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -364,8 +368,9 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -364,8 +368,9 @@ static void pSeries_flush_hash_range(unsigned long context,
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
} else { } else {
/* XXX double check that it is safe to take this late */ int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE))
if (lock_tlbie)
spin_lock(&pSeries_tlbie_lock); spin_lock(&pSeries_tlbie_lock);
asm volatile("ptesync":::"memory"); asm volatile("ptesync":::"memory");
...@@ -375,7 +380,7 @@ static void pSeries_flush_hash_range(unsigned long context, ...@@ -375,7 +380,7 @@ static void pSeries_flush_hash_range(unsigned long context,
asm volatile("eieio; tlbsync; ptesync":::"memory"); asm volatile("eieio; tlbsync; ptesync":::"memory");
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_unlock(&pSeries_tlbie_lock); spin_unlock(&pSeries_tlbie_lock);
} }
......
...@@ -660,14 +660,15 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, ...@@ -660,14 +660,15 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
int i; int i;
unsigned long flags; unsigned long flags;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
int lock_tlbie = !(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
for (i = 0; i < number; i++) for (i = 0; i < number; i++)
flush_hash_page(context, batch->addr[i], batch->pte[i], local); flush_hash_page(context, batch->addr[i], batch->pte[i], local);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_LOCKLESS_TLBIE)) if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment