Commit 96e28449 authored by David Gibson's avatar David Gibson Committed by Linus Torvalds

[PATCH] ppc64: kill bitfields in ppc64 hash code

This patch removes the use of bitfield types from the ppc64 hash table
manipulation code.
Signed-off-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Acked-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f13487c6
...@@ -38,11 +38,12 @@ static inline void iSeries_hunlock(unsigned long slot) ...@@ -38,11 +38,12 @@ static inline void iSeries_hunlock(unsigned long slot)
} }
static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary, unsigned long prpn, unsigned long vflags,
unsigned long hpteflags, int bolted, int large) unsigned long rflags)
{ {
long slot; long slot;
HPTE lhpte; hpte_t lhpte;
int secondary = 0;
/* /*
* The hypervisor tries both primary and secondary. * The hypervisor tries both primary and secondary.
...@@ -50,13 +51,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -50,13 +51,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
* it means we have already tried both primary and secondary, * it means we have already tried both primary and secondary,
* so we return failure immediately. * so we return failure immediately.
*/ */
if (secondary) if (vflags & HPTE_V_SECONDARY)
return -1; return -1;
iSeries_hlock(hpte_group); iSeries_hlock(hpte_group);
slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
BUG_ON(lhpte.dw0.dw0.v); BUG_ON(lhpte.v & HPTE_V_VALID);
if (slot == -1) { /* No available entry found in either group */ if (slot == -1) { /* No available entry found in either group */
iSeries_hunlock(hpte_group); iSeries_hunlock(hpte_group);
...@@ -64,19 +65,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -64,19 +65,13 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
} }
if (slot < 0) { /* MSB set means secondary group */ if (slot < 0) { /* MSB set means secondary group */
vflags |= HPTE_V_VALID;
secondary = 1; secondary = 1;
slot &= 0x7fffffffffffffff; slot &= 0x7fffffffffffffff;
} }
lhpte.dw1.dword1 = 0; lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
lhpte.dw1.dw1.rpn = physRpn_to_absRpn(prpn); lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags;
lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1;
/* Now fill in the actual HPTE */ /* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte); HvCallHpt_addValidate(slot, secondary, &lhpte);
...@@ -88,20 +83,17 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -88,20 +83,17 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
static unsigned long iSeries_hpte_getword0(unsigned long slot) static unsigned long iSeries_hpte_getword0(unsigned long slot)
{ {
unsigned long dword0; hpte_t hpte;
HPTE hpte;
HvCallHpt_get(&hpte, slot); HvCallHpt_get(&hpte, slot);
dword0 = hpte.dw0.dword0; return hpte.v;
return dword0;
} }
static long iSeries_hpte_remove(unsigned long hpte_group) static long iSeries_hpte_remove(unsigned long hpte_group)
{ {
unsigned long slot_offset; unsigned long slot_offset;
int i; int i;
HPTE lhpte; unsigned long hpte_v;
/* Pick a random slot to start at */ /* Pick a random slot to start at */
slot_offset = mftb() & 0x7; slot_offset = mftb() & 0x7;
...@@ -109,10 +101,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group) ...@@ -109,10 +101,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
iSeries_hlock(hpte_group); iSeries_hlock(hpte_group);
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
lhpte.dw0.dword0 = hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
iSeries_hpte_getword0(hpte_group + slot_offset);
if (!lhpte.dw0.dw0.bolted) { if (! (hpte_v & HPTE_V_BOLTED)) {
HvCallHpt_invalidateSetSwBitsGet(hpte_group + HvCallHpt_invalidateSetSwBitsGet(hpte_group +
slot_offset, 0, 0); slot_offset, 0, 0);
iSeries_hunlock(hpte_group); iSeries_hunlock(hpte_group);
...@@ -137,13 +128,13 @@ static long iSeries_hpte_remove(unsigned long hpte_group) ...@@ -137,13 +128,13 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local) unsigned long va, int large, int local)
{ {
HPTE hpte; hpte_t hpte;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
iSeries_hlock(slot); iSeries_hlock(slot);
HvCallHpt_get(&hpte, slot); HvCallHpt_get(&hpte, slot);
if ((hpte.dw0.dw0.avpn == avpn) && (hpte.dw0.dw0.v)) { if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) {
/* /*
* Hypervisor expects bits as NPPP, which is * Hypervisor expects bits as NPPP, which is
* different from how they are mapped in our PP. * different from how they are mapped in our PP.
...@@ -167,7 +158,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, ...@@ -167,7 +158,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
*/ */
static long iSeries_hpte_find(unsigned long vpn) static long iSeries_hpte_find(unsigned long vpn)
{ {
HPTE hpte; hpte_t hpte;
long slot; long slot;
/* /*
...@@ -177,7 +168,7 @@ static long iSeries_hpte_find(unsigned long vpn) ...@@ -177,7 +168,7 @@ static long iSeries_hpte_find(unsigned long vpn)
* 0x80000000xxxxxxxx : Entry found in secondary group, slot x * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
*/ */
slot = HvCallHpt_findValid(&hpte, vpn); slot = HvCallHpt_findValid(&hpte, vpn);
if (hpte.dw0.dw0.v) { if (hpte.v & HPTE_V_VALID) {
if (slot < 0) { if (slot < 0) {
slot &= 0x7fffffffffffffff; slot &= 0x7fffffffffffffff;
slot = -slot; slot = -slot;
...@@ -212,7 +203,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -212,7 +203,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local) int large, int local)
{ {
HPTE lhpte; unsigned long hpte_v;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
...@@ -220,9 +211,9 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -220,9 +211,9 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
iSeries_hlock(slot); iSeries_hlock(slot);
lhpte.dw0.dword0 = iSeries_hpte_getword0(slot); hpte_v = iSeries_hpte_getword0(slot);
if ((lhpte.dw0.dw0.avpn == avpn) && lhpte.dw0.dw0.v) if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0); HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
iSeries_hunlock(slot); iSeries_hunlock(slot);
......
...@@ -503,7 +503,7 @@ static void __init build_iSeries_Memory_Map(void) ...@@ -503,7 +503,7 @@ static void __init build_iSeries_Memory_Map(void)
/* Fill in the hashed page table hash mask */ /* Fill in the hashed page table hash mask */
num_ptegs = hptSizePages * num_ptegs = hptSizePages *
(PAGE_SIZE / (sizeof(HPTE) * HPTES_PER_GROUP)); (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
htab_hash_mask = num_ptegs - 1; htab_hash_mask = num_ptegs - 1;
/* /*
...@@ -618,25 +618,23 @@ static void __init setup_iSeries_cache_sizes(void) ...@@ -618,25 +618,23 @@ static void __init setup_iSeries_cache_sizes(void)
static void iSeries_make_pte(unsigned long va, unsigned long pa, static void iSeries_make_pte(unsigned long va, unsigned long pa,
int mode) int mode)
{ {
HPTE local_hpte, rhpte; hpte_t local_hpte, rhpte;
unsigned long hash, vpn; unsigned long hash, vpn;
long slot; long slot;
vpn = va >> PAGE_SHIFT; vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, 0); hash = hpt_hash(vpn, 0);
local_hpte.dw1.dword1 = pa | mode; local_hpte.r = pa | mode;
local_hpte.dw0.dword0 = 0; local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
local_hpte.dw0.dw0.avpn = va >> 23; | HPTE_V_BOLTED | HPTE_V_VALID;
local_hpte.dw0.dw0.bolted = 1; /* bolted */
local_hpte.dw0.dw0.v = 1;
slot = HvCallHpt_findValid(&rhpte, vpn); slot = HvCallHpt_findValid(&rhpte, vpn);
if (slot < 0) { if (slot < 0) {
/* Must find space in primary group */ /* Must find space in primary group */
panic("hash_page: hpte already exists\n"); panic("hash_page: hpte already exists\n");
} }
HvCallHpt_addValidate(slot, 0, (HPTE *)&local_hpte ); HvCallHpt_addValidate(slot, 0, &local_hpte);
} }
/* /*
...@@ -646,7 +644,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) ...@@ -646,7 +644,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
{ {
unsigned long pa; unsigned long pa;
unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
HPTE hpte; hpte_t hpte;
for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) { for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
unsigned long ea = (unsigned long)__va(pa); unsigned long ea = (unsigned long)__va(pa);
...@@ -659,7 +657,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) ...@@ -659,7 +657,7 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
if (!in_kernel_text(ea)) if (!in_kernel_text(ea))
mode_rw |= HW_NO_EXEC; mode_rw |= HW_NO_EXEC;
if (hpte.dw0.dw0.v) { if (hpte.v & HPTE_V_VALID) {
/* HPTE exists, so just bolt it */ /* HPTE exists, so just bolt it */
HvCallHpt_setSwBits(slot, 0x10, 0); HvCallHpt_setSwBits(slot, 0x10, 0);
/* And make sure the pp bits are correct */ /* And make sure the pp bits are correct */
......
...@@ -277,31 +277,20 @@ void vpa_init(int cpu) ...@@ -277,31 +277,20 @@ void vpa_init(int cpu)
long pSeries_lpar_hpte_insert(unsigned long hpte_group, long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn, unsigned long va, unsigned long prpn,
int secondary, unsigned long hpteflags, unsigned long vflags, unsigned long rflags)
int bolted, int large)
{ {
unsigned long arpn = physRpn_to_absRpn(prpn); unsigned long arpn = physRpn_to_absRpn(prpn);
unsigned long lpar_rc; unsigned long lpar_rc;
unsigned long flags; unsigned long flags;
unsigned long slot; unsigned long slot;
HPTE lhpte; unsigned long hpte_v, hpte_r;
unsigned long dummy0, dummy1; unsigned long dummy0, dummy1;
/* Fill in the local HPTE with absolute rpn, avpn and flags */ hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID;
lhpte.dw1.dword1 = 0; if (vflags & HPTE_V_LARGE)
lhpte.dw1.dw1.rpn = arpn; hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
lhpte.dw1.flags.flags = hpteflags;
lhpte.dw0.dword0 = 0; hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1;
if (large) {
lhpte.dw0.dw0.l = 1;
lhpte.dw0.dw0.avpn &= ~0x1UL;
}
/* Now fill in the actual HPTE */ /* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */ /* Set CEC cookie to 0 */
...@@ -312,11 +301,11 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, ...@@ -312,11 +301,11 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
flags = 0; flags = 0;
/* XXX why is this here? - Anton */ /* XXX why is this here? - Anton */
if (hpteflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
lhpte.dw1.flags.flags &= ~_PAGE_COHERENT; hpte_r &= ~_PAGE_COHERENT;
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, lhpte.dw0.dword0, lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
lhpte.dw1.dword1, &slot, &dummy0, &dummy1); hpte_r, &slot, &dummy0, &dummy1);
if (unlikely(lpar_rc == H_PTEG_Full)) if (unlikely(lpar_rc == H_PTEG_Full))
return -1; return -1;
...@@ -332,7 +321,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, ...@@ -332,7 +321,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Because of iSeries, we have to pass down the secondary /* Because of iSeries, we have to pass down the secondary
* bucket bit here as well * bucket bit here as well
*/ */
return (slot & 7) | (secondary << 3); return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
} }
static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
...@@ -427,22 +416,18 @@ static long pSeries_lpar_hpte_find(unsigned long vpn) ...@@ -427,22 +416,18 @@ static long pSeries_lpar_hpte_find(unsigned long vpn)
unsigned long hash; unsigned long hash;
unsigned long i, j; unsigned long i, j;
long slot; long slot;
union { unsigned long hpte_v;
unsigned long dword0;
Hpte_dword0 dw0;
} hpte_dw0;
Hpte_dword0 dw0;
hash = hpt_hash(vpn, 0); hash = hpt_hash(vpn, 0);
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
hpte_dw0.dword0 = pSeries_lpar_hpte_getword0(slot); hpte_v = pSeries_lpar_hpte_getword0(slot);
dw0 = hpte_dw0.dw0;
if ((dw0.avpn == (vpn >> 11)) && dw0.v && if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
(dw0.h == j)) { && (hpte_v & HPTE_V_VALID)
&& (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */ /* HPTE matches */
if (j) if (j)
slot = -slot; slot = -slot;
......
...@@ -170,9 +170,7 @@ htab_insert_pte: ...@@ -170,9 +170,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */ /* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */ ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */ mr r4,r29 /* Retreive va */
li r6,0 /* primary slot */ li r6,0 /* no vflags */
li r8,0 /* not bolted and not large */
li r9,0
_GLOBAL(htab_call_hpte_insert1) _GLOBAL(htab_call_hpte_insert1)
bl . /* Will be patched by htab_finish_init() */ bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
...@@ -192,9 +190,7 @@ _GLOBAL(htab_call_hpte_insert1) ...@@ -192,9 +190,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */ /* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */ ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */ mr r4,r29 /* Retreive va */
li r6,1 /* secondary slot */ li r6,HPTE_V_SECONDARY@l /* secondary slot */
li r8,0 /* not bolted and not large */
li r9,0
_GLOBAL(htab_call_hpte_insert2) _GLOBAL(htab_call_hpte_insert2)
bl . /* Will be patched by htab_finish_init() */ bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0 cmpdi 0,r3,0
......
...@@ -27,9 +27,9 @@ ...@@ -27,9 +27,9 @@
static DEFINE_SPINLOCK(native_tlbie_lock); static DEFINE_SPINLOCK(native_tlbie_lock);
static inline void native_lock_hpte(HPTE *hptep) static inline void native_lock_hpte(hpte_t *hptep)
{ {
unsigned long *word = &hptep->dw0.dword0; unsigned long *word = &hptep->v;
while (1) { while (1) {
if (!test_and_set_bit(HPTE_LOCK_BIT, word)) if (!test_and_set_bit(HPTE_LOCK_BIT, word))
...@@ -39,32 +39,28 @@ static inline void native_lock_hpte(HPTE *hptep) ...@@ -39,32 +39,28 @@ static inline void native_lock_hpte(HPTE *hptep)
} }
} }
static inline void native_unlock_hpte(HPTE *hptep) static inline void native_unlock_hpte(hpte_t *hptep)
{ {
unsigned long *word = &hptep->dw0.dword0; unsigned long *word = &hptep->v;
asm volatile("lwsync":::"memory"); asm volatile("lwsync":::"memory");
clear_bit(HPTE_LOCK_BIT, word); clear_bit(HPTE_LOCK_BIT, word);
} }
long native_hpte_insert(unsigned long hpte_group, unsigned long va, long native_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary, unsigned long prpn, unsigned long vflags,
unsigned long hpteflags, int bolted, int large) unsigned long rflags)
{ {
unsigned long arpn = physRpn_to_absRpn(prpn); unsigned long arpn = physRpn_to_absRpn(prpn);
HPTE *hptep = htab_address + hpte_group; hpte_t *hptep = htab_address + hpte_group;
Hpte_dword0 dw0; unsigned long hpte_v, hpte_r;
HPTE lhpte;
int i; int i;
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
dw0 = hptep->dw0.dw0; if (! (hptep->v & HPTE_V_VALID)) {
if (!dw0.v) {
/* retry with lock held */ /* retry with lock held */
native_lock_hpte(hptep); native_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; if (! (hptep->v & HPTE_V_VALID))
if (!dw0.v)
break; break;
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
} }
...@@ -75,56 +71,45 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va, ...@@ -75,56 +71,45 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP) if (i == HPTES_PER_GROUP)
return -1; return -1;
lhpte.dw1.dword1 = 0; hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
lhpte.dw1.dw1.rpn = arpn; if (vflags & HPTE_V_LARGE)
lhpte.dw1.flags.flags = hpteflags; va &= ~(1UL << HPTE_V_AVPN_SHIFT);
hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
lhpte.dw0.dword0 = 0;
lhpte.dw0.dw0.avpn = va >> 23;
lhpte.dw0.dw0.h = secondary;
lhpte.dw0.dw0.bolted = bolted;
lhpte.dw0.dw0.v = 1;
if (large) {
lhpte.dw0.dw0.l = 1;
lhpte.dw0.dw0.avpn &= ~0x1UL;
}
hptep->dw1.dword1 = lhpte.dw1.dword1;
hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */ /* Guarantee the second dword is visible before the valid bit */
__asm__ __volatile__ ("eieio" : : : "memory"); __asm__ __volatile__ ("eieio" : : : "memory");
/* /*
* Now set the first dword including the valid bit * Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte * NOTE: this also unlocks the hpte
*/ */
hptep->dw0.dword0 = lhpte.dw0.dword0; hptep->v = hpte_v;
__asm__ __volatile__ ("ptesync" : : : "memory"); __asm__ __volatile__ ("ptesync" : : : "memory");
return i | (secondary << 3); return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
} }
static long native_hpte_remove(unsigned long hpte_group) static long native_hpte_remove(unsigned long hpte_group)
{ {
HPTE *hptep; hpte_t *hptep;
Hpte_dword0 dw0;
int i; int i;
int slot_offset; int slot_offset;
unsigned long hpte_v;
/* pick a random entry to start at */ /* pick a random entry to start at */
slot_offset = mftb() & 0x7; slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset; hptep = htab_address + hpte_group + slot_offset;
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
if (dw0.v && !dw0.bolted) { if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
/* retry with lock held */ /* retry with lock held */
native_lock_hpte(hptep); native_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
if (dw0.v && !dw0.bolted) if ((hpte_v & HPTE_V_VALID)
&& !(hpte_v & HPTE_V_BOLTED))
break; break;
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
} }
...@@ -137,15 +122,15 @@ static long native_hpte_remove(unsigned long hpte_group) ...@@ -137,15 +122,15 @@ static long native_hpte_remove(unsigned long hpte_group)
return -1; return -1;
/* Invalidate the hpte. NOTE: this also unlocks it */ /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0; hptep->v = 0;
return i; return i;
} }
static inline void set_pp_bit(unsigned long pp, HPTE *addr) static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
{ {
unsigned long old; unsigned long old;
unsigned long *p = &addr->dw1.dword1; unsigned long *p = &addr->r;
__asm__ __volatile__( __asm__ __volatile__(
"1: ldarx %0,0,%3\n\ "1: ldarx %0,0,%3\n\
...@@ -163,11 +148,11 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr) ...@@ -163,11 +148,11 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
*/ */
static long native_hpte_find(unsigned long vpn) static long native_hpte_find(unsigned long vpn)
{ {
HPTE *hptep; hpte_t *hptep;
unsigned long hash; unsigned long hash;
unsigned long i, j; unsigned long i, j;
long slot; long slot;
Hpte_dword0 dw0; unsigned long hpte_v;
hash = hpt_hash(vpn, 0); hash = hpt_hash(vpn, 0);
...@@ -175,10 +160,11 @@ static long native_hpte_find(unsigned long vpn) ...@@ -175,10 +160,11 @@ static long native_hpte_find(unsigned long vpn)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) { for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot; hptep = htab_address + slot;
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
if ((dw0.avpn == (vpn >> 11)) && dw0.v && if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
(dw0.h == j)) { && (hpte_v & HPTE_V_VALID)
&& ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */ /* HPTE matches */
if (j) if (j)
slot = -slot; slot = -slot;
...@@ -195,20 +181,21 @@ static long native_hpte_find(unsigned long vpn) ...@@ -195,20 +181,21 @@ static long native_hpte_find(unsigned long vpn)
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local) unsigned long va, int large, int local)
{ {
HPTE *hptep = htab_address + slot; hpte_t *hptep = htab_address + slot;
Hpte_dword0 dw0; unsigned long hpte_v;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
int ret = 0; int ret = 0;
if (large) if (large)
avpn &= ~0x1UL; avpn &= ~1;
native_lock_hpte(hptep); native_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */ /* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
|| !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
ret = -1; ret = -1;
} else { } else {
...@@ -244,7 +231,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -244,7 +231,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
{ {
unsigned long vsid, va, vpn, flags = 0; unsigned long vsid, va, vpn, flags = 0;
long slot; long slot;
HPTE *hptep; hpte_t *hptep;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
...@@ -269,26 +256,27 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) ...@@ -269,26 +256,27 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
static void native_hpte_invalidate(unsigned long slot, unsigned long va, static void native_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local) int large, int local)
{ {
HPTE *hptep = htab_address + slot; hpte_t *hptep = htab_address + slot;
Hpte_dword0 dw0; unsigned long hpte_v;
unsigned long avpn = va >> 23; unsigned long avpn = va >> 23;
unsigned long flags; unsigned long flags;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (large) if (large)
avpn &= ~0x1UL; avpn &= ~1;
local_irq_save(flags); local_irq_save(flags);
native_lock_hpte(hptep); native_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */ /* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
|| !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
} else { } else {
/* Invalidate the hpte. NOTE: this also unlocks it */ /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0; hptep->v = 0;
} }
/* Invalidate the tlb */ /* Invalidate the tlb */
...@@ -315,8 +303,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -315,8 +303,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
static void native_hpte_clear(void) static void native_hpte_clear(void)
{ {
unsigned long slot, slots, flags; unsigned long slot, slots, flags;
HPTE *hptep = htab_address; hpte_t *hptep = htab_address;
Hpte_dword0 dw0; unsigned long hpte_v;
unsigned long pteg_count; unsigned long pteg_count;
pteg_count = htab_hash_mask + 1; pteg_count = htab_hash_mask + 1;
...@@ -336,11 +324,11 @@ static void native_hpte_clear(void) ...@@ -336,11 +324,11 @@ static void native_hpte_clear(void)
* running, right? and for crash dump, we probably * running, right? and for crash dump, we probably
* don't want to wait for a maybe bad cpu. * don't want to wait for a maybe bad cpu.
*/ */
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
if (dw0.v) { if (hpte_v & HPTE_V_VALID) {
hptep->dw0.dword0 = 0; hptep->v = 0;
tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l); tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
} }
} }
...@@ -353,8 +341,8 @@ static void native_flush_hash_range(unsigned long context, ...@@ -353,8 +341,8 @@ static void native_flush_hash_range(unsigned long context,
{ {
unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
int i, j; int i, j;
HPTE *hptep; hpte_t *hptep;
Hpte_dword0 dw0; unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
/* XXX fix for large ptes */ /* XXX fix for large ptes */
...@@ -390,14 +378,15 @@ static void native_flush_hash_range(unsigned long context, ...@@ -390,14 +378,15 @@ static void native_flush_hash_range(unsigned long context,
native_lock_hpte(hptep); native_lock_hpte(hptep);
dw0 = hptep->dw0.dw0; hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */ /* Even if we miss, we need to invalidate the TLB */
if ((dw0.avpn != avpn) || !dw0.v) { if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
|| !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep); native_unlock_hpte(hptep);
} else { } else {
/* Invalidate the hpte. NOTE: this also unlocks it */ /* Invalidate the hpte. NOTE: this also unlocks it */
hptep->dw0.dword0 = 0; hptep->v = 0;
} }
j++; j++;
......
...@@ -75,8 +75,8 @@ ...@@ -75,8 +75,8 @@
extern unsigned long dart_tablebase; extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */ #endif /* CONFIG_U3_DART */
HPTE *htab_address; hpte_t *htab_address;
unsigned long htab_hash_mask; unsigned long htab_hash_mask;
extern unsigned long _SDR1; extern unsigned long _SDR1;
...@@ -97,11 +97,15 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end, ...@@ -97,11 +97,15 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
unsigned long addr; unsigned long addr;
unsigned int step; unsigned int step;
unsigned long tmp_mode; unsigned long tmp_mode;
unsigned long vflags;
if (large) if (large) {
step = 16*MB; step = 16*MB;
else vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
} else {
step = 4*KB; step = 4*KB;
vflags = HPTE_V_BOLTED;
}
for (addr = start; addr < end; addr += step) { for (addr = start; addr < end; addr += step) {
unsigned long vpn, hash, hpteg; unsigned long vpn, hash, hpteg;
...@@ -129,12 +133,12 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end, ...@@ -129,12 +133,12 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
if (systemcfg->platform & PLATFORM_LPAR) if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va, ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT, virt_to_abs(addr) >> PAGE_SHIFT,
0, tmp_mode, 1, large); vflags, tmp_mode);
else else
#endif /* CONFIG_PPC_PSERIES */ #endif /* CONFIG_PPC_PSERIES */
ret = native_hpte_insert(hpteg, va, ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT, virt_to_abs(addr) >> PAGE_SHIFT,
0, tmp_mode, 1, large); vflags, tmp_mode);
if (ret == -1) { if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping"); ppc64_terminate_msg(0x20, "create_pte_mapping");
......
...@@ -583,7 +583,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -583,7 +583,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
pte_t *ptep; pte_t *ptep;
unsigned long va, vpn; unsigned long va, vpn;
pte_t old_pte, new_pte; pte_t old_pte, new_pte;
unsigned long hpteflags, prpn; unsigned long rflags, prpn;
long slot; long slot;
int err = 1; int err = 1;
...@@ -626,9 +626,9 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -626,9 +626,9 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
old_pte = *ptep; old_pte = *ptep;
new_pte = old_pte; new_pte = old_pte;
hpteflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW)); rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
hpteflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC); rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
/* Check if pte already has an hpte (case 2) */ /* Check if pte already has an hpte (case 2) */
if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) { if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
...@@ -641,7 +641,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -641,7 +641,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12; slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1) if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
pte_val(old_pte) &= ~_PAGE_HPTEFLAGS; pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
} }
...@@ -661,10 +661,10 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -661,10 +661,10 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
/* Add in WIMG bits */ /* Add in WIMG bits */
/* XXX We should store these in the pte */ /* XXX We should store these in the pte */
hpteflags |= _PAGE_COHERENT; rflags |= _PAGE_COHERENT;
slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0, slot = ppc_md.hpte_insert(hpte_group, va, prpn,
hpteflags, 0, 1); HPTE_V_LARGE, rflags);
/* Primary is full, try the secondary */ /* Primary is full, try the secondary */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
...@@ -672,7 +672,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -672,7 +672,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
hpte_group = ((~hash & htab_hash_mask) * hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, prpn, slot = ppc_md.hpte_insert(hpte_group, va, prpn,
1, hpteflags, 0, 1); HPTE_V_LARGE, rflags);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
......
...@@ -180,9 +180,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) ...@@ -180,9 +180,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */ /* Panic if a pte grpup is full */
if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0, if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
_PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX, HPTE_V_BOLTED,
1, 0) == -1) { _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
== -1) {
panic("map_io_page: could not insert mapping"); panic("map_io_page: could not insert mapping");
} }
} }
......
...@@ -77,27 +77,26 @@ static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson, ...@@ -77,27 +77,26 @@ static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
return compressedStatus; return compressedStatus;
} }
static inline u64 HvCallHpt_findValid(HPTE *hpte, u64 vpn) static inline u64 HvCallHpt_findValid(hpte_t *hpte, u64 vpn)
{ {
return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0); return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
} }
static inline u64 HvCallHpt_findNextValid(HPTE *hpte, u32 hpteIndex, static inline u64 HvCallHpt_findNextValid(hpte_t *hpte, u32 hpteIndex,
u8 bitson, u8 bitsoff) u8 bitson, u8 bitsoff)
{ {
return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex, return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
bitson, bitsoff); bitson, bitsoff);
} }
static inline void HvCallHpt_get(HPTE *hpte, u32 hpteIndex) static inline void HvCallHpt_get(hpte_t *hpte, u32 hpteIndex)
{ {
HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0); HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
} }
static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, HPTE *hpte) static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
{ {
HvCall4(HvCallHptAddValidate, hpteIndex, hBit, (*((u64 *)hpte)), HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
(*(((u64 *)hpte)+1)));
} }
#endif /* _HVCALLHPT_H */ #endif /* _HVCALLHPT_H */
...@@ -53,10 +53,8 @@ struct machdep_calls { ...@@ -53,10 +53,8 @@ struct machdep_calls {
long (*hpte_insert)(unsigned long hpte_group, long (*hpte_insert)(unsigned long hpte_group,
unsigned long va, unsigned long va,
unsigned long prpn, unsigned long prpn,
int secondary, unsigned long vflags,
unsigned long hpteflags, unsigned long rflags);
int bolted,
int large);
long (*hpte_remove)(unsigned long hpte_group); long (*hpte_remove)(unsigned long hpte_group);
void (*flush_hash_range)(unsigned long context, void (*flush_hash_range)(unsigned long context,
unsigned long number, unsigned long number,
......
...@@ -60,6 +60,22 @@ ...@@ -60,6 +60,22 @@
#define HPTES_PER_GROUP 8 #define HPTES_PER_GROUP 8
#define HPTE_V_AVPN_SHIFT 7
#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
/* Values for PP (assumes Ks=0, Kp=1) */ /* Values for PP (assumes Ks=0, Kp=1) */
/* pp0 will always be 0 for linux */ /* pp0 will always be 0 for linux */
#define PP_RWXX 0 /* Supervisor read/write, User none */ #define PP_RWXX 0 /* Supervisor read/write, User none */
...@@ -69,54 +85,13 @@ ...@@ -69,54 +85,13 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Hardware Page Table Entry */
typedef struct {
unsigned long avpn:57; /* vsid | api == avpn */
unsigned long : 2; /* Software use */
unsigned long bolted: 1; /* HPTE is "bolted" */
unsigned long lock: 1; /* lock on pSeries SMP */
unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
unsigned long h: 1; /* Hash function identifier */
unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
} Hpte_dword0;
typedef struct {
unsigned long pp0: 1; /* Page protection bit 0 */
unsigned long ts: 1; /* Tag set bit */
unsigned long rpn: 50; /* Real page number */
unsigned long : 2; /* Reserved */
unsigned long ac: 1; /* Address compare */
unsigned long r: 1; /* Referenced */
unsigned long c: 1; /* Changed */
unsigned long w: 1; /* Write-thru cache mode */
unsigned long i: 1; /* Cache inhibited */
unsigned long m: 1; /* Memory coherence required */
unsigned long g: 1; /* Guarded */
unsigned long n: 1; /* No-execute */
unsigned long pp: 2; /* Page protection bits 1:2 */
} Hpte_dword1;
typedef struct {
char padding[6]; /* padding */
unsigned long : 6; /* padding */
unsigned long flags: 10; /* HPTE flags */
} Hpte_dword1_flags;
typedef struct { typedef struct {
union { unsigned long v;
unsigned long dword0; unsigned long r;
Hpte_dword0 dw0; } hpte_t;
} dw0;
union {
unsigned long dword1;
Hpte_dword1 dw1;
Hpte_dword1_flags flags;
} dw1;
} HPTE;
extern HPTE * htab_address; extern hpte_t *htab_address;
extern unsigned long htab_hash_mask; extern unsigned long htab_hash_mask;
static inline unsigned long hpt_hash(unsigned long vpn, int large) static inline unsigned long hpt_hash(unsigned long vpn, int large)
{ {
...@@ -181,18 +156,18 @@ static inline void tlbiel(unsigned long va) ...@@ -181,18 +156,18 @@ static inline void tlbiel(unsigned long va)
asm volatile("ptesync": : :"memory"); asm volatile("ptesync": : :"memory");
} }
static inline unsigned long slot2va(unsigned long avpn, unsigned long large, static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
unsigned long secondary, unsigned long slot)
{ {
unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
unsigned long va; unsigned long va;
va = avpn << 23; va = avpn << 23;
if (!large) { if (! (hpte_v & HPTE_V_LARGE)) {
unsigned long vpi, pteg; unsigned long vpi, pteg;
pteg = slot / HPTES_PER_GROUP; pteg = slot / HPTES_PER_GROUP;
if (secondary) if (hpte_v & HPTE_V_SECONDARY)
pteg = ~pteg; pteg = ~pteg;
vpi = ((va >> 28) ^ pteg) & htab_hash_mask; vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
...@@ -219,11 +194,11 @@ extern void hpte_init_iSeries(void); ...@@ -219,11 +194,11 @@ extern void hpte_init_iSeries(void);
extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn, unsigned long va, unsigned long prpn,
int secondary, unsigned long hpteflags, unsigned long vflags,
int bolted, int large); unsigned long rflags);
extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
unsigned long prpn, int secondary, unsigned long prpn,
unsigned long hpteflags, int bolted, int large); unsigned long vflags, unsigned long rflags);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment