Commit 7025776e authored by Benjamin Herrenschmidt's avatar Benjamin Herrenschmidt Committed by Michael Ellerman

powerpc/mm: Move hash table ops to a separate structure

Moving probe_machine() to after mmu init will cause the ppc_md
fields relative to the hash table management to be overwritten.

Since we have essentially disconnected the machine type from
the hash backend ops, finish the job by moving them to a different
structure.

The only callback that didn't quite fix is update_partition_table
since this is not specific to hash, so I moved it to a standalone
variable for now. We can revisit later if needed.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
[mpe: Fix ppc64e build failure in kexec]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent b521f576
...@@ -124,6 +124,45 @@ ...@@ -124,6 +124,45 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct mmu_hash_ops {
void (*hpte_invalidate)(unsigned long slot,
unsigned long vpn,
int bpsize, int apsize,
int ssize, int local);
long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int bpsize, int apsize,
int ssize, unsigned long flags);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
long (*hpte_insert)(unsigned long hpte_group,
unsigned long vpn,
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
int psize, int apsize,
int ssize);
long (*hpte_remove)(unsigned long hpte_group);
int (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
void (*hugepage_invalidate)(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local);
/*
* Special for kexec.
* To be called in real mode with interrupts disabled. No locks are
* taken as such, concurrent access on pre POWER5 hardware could result
* in a deadlock.
* The linear mapping is destroyed as well.
*/
void (*hpte_clear_all)(void);
};
extern struct mmu_hash_ops mmu_hash_ops;
struct hash_pte { struct hash_pte {
__be64 v; __be64 v;
__be64 r; __be64 r;
......
...@@ -34,42 +34,6 @@ struct pci_host_bridge; ...@@ -34,42 +34,6 @@ struct pci_host_bridge;
struct machdep_calls { struct machdep_calls {
char *name; char *name;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
void (*hpte_invalidate)(unsigned long slot,
unsigned long vpn,
int bpsize, int apsize,
int ssize, int local);
long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int bpsize, int apsize,
int ssize, unsigned long flags);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
long (*hpte_insert)(unsigned long hpte_group,
unsigned long vpn,
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
int psize, int apsize,
int ssize);
long (*hpte_remove)(unsigned long hpte_group);
int (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
void (*hugepage_invalidate)(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local);
/*
* Special for kexec.
* To be called in real mode with interrupts disabled. No locks are
* taken as such, concurrent access on pre POWER5 hardware could result
* in a deadlock.
* The linear mapping is destroyed as well.
*/
void (*hpte_clear_all)(void);
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags, void *caller); unsigned long flags, void *caller);
void (*iounmap)(volatile void __iomem *token); void (*iounmap)(volatile void __iomem *token);
......
...@@ -55,7 +55,7 @@ int default_machine_kexec_prepare(struct kimage *image) ...@@ -55,7 +55,7 @@ int default_machine_kexec_prepare(struct kimage *image)
const unsigned long *basep; const unsigned long *basep;
const unsigned int *sizep; const unsigned int *sizep;
if (!ppc_md.hpte_clear_all) if (!mmu_hash_ops.hpte_clear_all)
return -ENOENT; return -ENOENT;
/* /*
...@@ -380,7 +380,12 @@ void default_machine_kexec(struct kimage *image) ...@@ -380,7 +380,12 @@ void default_machine_kexec(struct kimage *image)
*/ */
kexec_sequence(&kexec_stack, image->start, image, kexec_sequence(&kexec_stack, image->start, image,
page_address(image->control_code_page), page_address(image->control_code_page),
ppc_md.hpte_clear_all); #ifdef CONFIG_PPC_STD_MMU
mmu_hash_ops.hpte_clear_all
#else
NULL
#endif
);
/* NOTREACHED */ /* NOTREACHED */
} }
......
...@@ -667,7 +667,7 @@ _GLOBAL(kexec_sequence) ...@@ -667,7 +667,7 @@ _GLOBAL(kexec_sequence)
mr r12,r27 mr r12,r27
#endif #endif
mtctr r12 mtctr r12
bctrl /* ppc_md.hpte_clear_all(void); */ bctrl /* mmu_hash_ops.hpte_clear_all(void); */
#endif /* !CONFIG_PPC_BOOK3E */ #endif /* !CONFIG_PPC_BOOK3E */
/* /*
......
...@@ -34,9 +34,9 @@ ...@@ -34,9 +34,9 @@
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{ {
ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M, pte->pagesize, pte->pagesize,
false); MMU_SEGSIZE_256M, false);
} }
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
...@@ -169,13 +169,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -169,13 +169,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
/* In case we tried normal mapping already, let's nuke old entries */ /* In case we tried normal mapping already, let's nuke old entries */
if (attempt > 1) if (attempt > 1)
if (ppc_md.hpte_remove(hpteg) < 0) { if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
r = -1; r = -1;
goto out_unlock; goto out_unlock;
} }
ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
hpsize, hpsize, MMU_SEGSIZE_256M); hpsize, hpsize, MMU_SEGSIZE_256M);
if (ret < 0) { if (ret < 0) {
/* If we couldn't map a primary PTE, try a secondary */ /* If we couldn't map a primary PTE, try a secondary */
...@@ -187,8 +187,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -187,8 +187,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
trace_kvm_book3s_64_mmu_map(rflags, hpteg, trace_kvm_book3s_64_mmu_map(rflags, hpteg,
vpn, hpaddr, orig_pte); vpn, hpaddr, orig_pte);
/* The ppc_md code may give us a secondary entry even though we /*
asked for a primary. Fix up. */ * The mmu_hash_ops code may give us a secondary entry even
* though we asked for a primary. Fix up.
*/
if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
hash = ~hash; hash = ~hash;
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
......
...@@ -70,8 +70,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -70,8 +70,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT; slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K, if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
MMU_PAGE_4K, ssize, flags) == -1) MMU_PAGE_4K, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS; old_pte &= ~_PAGE_HPTEFLAGS;
} }
...@@ -84,21 +84,23 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -84,21 +84,23 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
MMU_PAGE_4K, MMU_PAGE_4K, ssize); MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/* /*
* Primary is full, try the secondary * Primary is full, try the secondary
*/ */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY, rflags,
MMU_PAGE_4K, MMU_PAGE_4K, ssize); HPTE_V_SECONDARY,
MMU_PAGE_4K,
MMU_PAGE_4K, ssize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
ppc_md.hpte_remove(hpte_group); mmu_hash_ops.hpte_remove(hpte_group);
/* /*
* FIXME!! Should be try the group from which we removed ? * FIXME!! Should be try the group from which we removed ?
*/ */
......
...@@ -133,9 +133,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -133,9 +133,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX; slot += hidx & _PTEIDX_GROUP_IX;
ret = ppc_md.hpte_updatepp(slot, rflags, vpn, ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
MMU_PAGE_4K, MMU_PAGE_4K, MMU_PAGE_4K, MMU_PAGE_4K,
ssize, flags); ssize, flags);
/* /*
*if we failed because typically the HPTE wasn't really here *if we failed because typically the HPTE wasn't really here
* we try an insertion. * we try an insertion.
...@@ -166,21 +166,22 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -166,21 +166,22 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
MMU_PAGE_4K, MMU_PAGE_4K, ssize); MMU_PAGE_4K, MMU_PAGE_4K, ssize);
/* /*
* Primary is full, try the secondary * Primary is full, try the secondary
*/ */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY, rflags, HPTE_V_SECONDARY,
MMU_PAGE_4K, MMU_PAGE_4K, ssize); MMU_PAGE_4K, MMU_PAGE_4K,
ssize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
ppc_md.hpte_remove(hpte_group); mmu_hash_ops.hpte_remove(hpte_group);
/* /*
* FIXME!! Should be try the group from which we removed ? * FIXME!! Should be try the group from which we removed ?
*/ */
...@@ -272,8 +273,9 @@ int __hash_page_64K(unsigned long ea, unsigned long access, ...@@ -272,8 +273,9 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT; slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K, if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
MMU_PAGE_64K, ssize, flags) == -1) MMU_PAGE_64K, ssize,
flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS; old_pte &= ~_PAGE_HPTEFLAGS;
} }
...@@ -286,21 +288,24 @@ int __hash_page_64K(unsigned long ea, unsigned long access, ...@@ -286,21 +288,24 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
MMU_PAGE_64K, MMU_PAGE_64K, ssize); MMU_PAGE_64K, MMU_PAGE_64K,
ssize);
/* /*
* Primary is full, try the secondary * Primary is full, try the secondary
*/ */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY, rflags,
MMU_PAGE_64K, MMU_PAGE_64K, ssize); HPTE_V_SECONDARY,
MMU_PAGE_64K,
MMU_PAGE_64K, ssize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
ppc_md.hpte_remove(hpte_group); mmu_hash_ops.hpte_remove(hpte_group);
/* /*
* FIXME!! Should be try the group from which we removed ? * FIXME!! Should be try the group from which we removed ?
*/ */
......
...@@ -739,14 +739,14 @@ static int native_register_proc_table(unsigned long base, unsigned long page_siz ...@@ -739,14 +739,14 @@ static int native_register_proc_table(unsigned long base, unsigned long page_siz
void __init hpte_init_native(void) void __init hpte_init_native(void)
{ {
ppc_md.hpte_invalidate = native_hpte_invalidate; mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
ppc_md.hpte_updatepp = native_hpte_updatepp; mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
ppc_md.hpte_insert = native_hpte_insert; mmu_hash_ops.hpte_insert = native_hpte_insert;
ppc_md.hpte_remove = native_hpte_remove; mmu_hash_ops.hpte_remove = native_hpte_remove;
ppc_md.hpte_clear_all = native_hpte_clear; mmu_hash_ops.hpte_clear_all = native_hpte_clear;
ppc_md.flush_hash_range = native_flush_hash_range; mmu_hash_ops.flush_hash_range = native_flush_hash_range;
ppc_md.hugepage_invalidate = native_hugepage_invalidate; mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
if (cpu_has_feature(CPU_FTR_ARCH_300)) if (cpu_has_feature(CPU_FTR_ARCH_300))
ppc_md.register_process_table = native_register_proc_table; ppc_md.register_process_table = native_register_proc_table;
......
...@@ -118,6 +118,8 @@ static u8 *linear_map_hash_slots; ...@@ -118,6 +118,8 @@ static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count; static unsigned long linear_map_hash_count;
static DEFINE_SPINLOCK(linear_map_hash_lock); static DEFINE_SPINLOCK(linear_map_hash_lock);
#endif /* CONFIG_DEBUG_PAGEALLOC */ #endif /* CONFIG_DEBUG_PAGEALLOC */
struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops);
/* There are definitions of page sizes arrays to be used when none /* There are definitions of page sizes arrays to be used when none
* is provided by the firmware. * is provided by the firmware.
...@@ -276,9 +278,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ...@@ -276,9 +278,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hash = hpt_hash(vpn, shift, ssize); hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!ppc_md.hpte_insert); BUG_ON(!mmu_hash_ops.hpte_insert);
ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot, ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize, ssize); HPTE_V_BOLTED, psize, psize,
ssize);
if (ret < 0) if (ret < 0)
break; break;
...@@ -303,11 +306,11 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend, ...@@ -303,11 +306,11 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
shift = mmu_psize_defs[psize].shift; shift = mmu_psize_defs[psize].shift;
step = 1 << shift; step = 1 << shift;
if (!ppc_md.hpte_removebolted) if (!mmu_hash_ops.hpte_removebolted)
return -ENODEV; return -ENODEV;
for (vaddr = vstart; vaddr < vend; vaddr += step) { for (vaddr = vstart; vaddr < vend; vaddr += step) {
rc = ppc_md.hpte_removebolted(vaddr, psize, ssize); rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
if (rc == -ENOENT) { if (rc == -ENOENT) {
ret = -ENOENT; ret = -ENOENT;
continue; continue;
...@@ -789,8 +792,8 @@ static void __init htab_initialize(void) ...@@ -789,8 +792,8 @@ static void __init htab_initialize(void)
* Clear the htab if firmware assisted dump is active so * Clear the htab if firmware assisted dump is active so
* that we dont end up using old mappings. * that we dont end up using old mappings.
*/ */
if (is_fadump_active() && ppc_md.hpte_clear_all) if (is_fadump_active() && mmu_hash_ops.hpte_clear_all)
ppc_md.hpte_clear_all(); mmu_hash_ops.hpte_clear_all();
#endif #endif
} else { } else {
unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE; unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
...@@ -1480,7 +1483,8 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, ...@@ -1480,7 +1483,8 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
* We use same base page size and actual psize, because we don't * We use same base page size and actual psize, because we don't
* use these functions for hugepage * use these functions for hugepage
*/ */
ppc_md.hpte_invalidate(slot, vpn, psize, psize, ssize, local); mmu_hash_ops.hpte_invalidate(slot, vpn, psize, psize,
ssize, local);
} pte_iterate_hashed_end(); } pte_iterate_hashed_end();
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
...@@ -1521,9 +1525,9 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr, ...@@ -1521,9 +1525,9 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
if (!hpte_slot_array) if (!hpte_slot_array)
return; return;
if (ppc_md.hugepage_invalidate) { if (mmu_hash_ops.hugepage_invalidate) {
ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, mmu_hash_ops.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
psize, ssize, local); psize, ssize, local);
goto tm_abort; goto tm_abort;
} }
/* /*
...@@ -1550,8 +1554,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr, ...@@ -1550,8 +1554,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX; slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, psize, mmu_hash_ops.hpte_invalidate(slot, vpn, psize,
MMU_PAGE_16M, ssize, local); MMU_PAGE_16M, ssize, local);
} }
tm_abort: tm_abort:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
...@@ -1575,8 +1579,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr, ...@@ -1575,8 +1579,8 @@ void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
void flush_hash_range(unsigned long number, int local) void flush_hash_range(unsigned long number, int local)
{ {
if (ppc_md.flush_hash_range) if (mmu_hash_ops.flush_hash_range)
ppc_md.flush_hash_range(number, local); mmu_hash_ops.flush_hash_range(number, local);
else { else {
int i; int i;
struct ppc64_tlb_batch *batch = struct ppc64_tlb_batch *batch =
...@@ -1621,22 +1625,22 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn, ...@@ -1621,22 +1625,22 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
psize, psize, ssize); psize, psize, ssize);
/* Primary is full, try the secondary */ /* Primary is full, try the secondary */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags,
vflags | HPTE_V_SECONDARY, vflags | HPTE_V_SECONDARY,
psize, psize, ssize); psize, psize, ssize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP)&~0x7UL; HPTES_PER_GROUP)&~0x7UL;
ppc_md.hpte_remove(hpte_group); mmu_hash_ops.hpte_remove(hpte_group);
goto repeat; goto repeat;
} }
} }
...@@ -1686,8 +1690,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) ...@@ -1686,8 +1690,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash; hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX; slot += hidx & _PTEIDX_GROUP_IX;
ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize,
mmu_kernel_ssize, 0); mmu_linear_psize,
mmu_kernel_ssize, 0);
} }
void __kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
......
...@@ -103,8 +103,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -103,8 +103,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX; slot += hidx & _PTEIDX_GROUP_IX;
ret = ppc_md.hpte_updatepp(slot, rflags, vpn, ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
psize, lpsize, ssize, flags); psize, lpsize, ssize, flags);
/* /*
* We failed to update, try to insert a new entry. * We failed to update, try to insert a new entry.
*/ */
...@@ -131,23 +131,24 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -131,23 +131,24 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
/* Insert into the hash table, primary slot */ /* Insert into the hash table, primary slot */
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
psize, lpsize, ssize); psize, lpsize, ssize);
/* /*
* Primary is full, try the secondary * Primary is full, try the secondary
*/ */
if (unlikely(slot == -1)) { if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) * hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, vpn, pa, slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
rflags, HPTE_V_SECONDARY, rflags,
psize, lpsize, ssize); HPTE_V_SECONDARY,
psize, lpsize, ssize);
if (slot == -1) { if (slot == -1) {
if (mftb() & 0x1) if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL; HPTES_PER_GROUP) & ~0x7UL;
ppc_md.hpte_remove(hpte_group); mmu_hash_ops.hpte_remove(hpte_group);
goto repeat; goto repeat;
} }
} }
......
...@@ -79,8 +79,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -79,8 +79,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT; slot += (old_pte & H_PAGE_F_GIX) >> H_PAGE_F_GIX_SHIFT;
if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, if (mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, mmu_psize,
mmu_psize, ssize, flags) == -1) mmu_psize, ssize, flags) == -1)
old_pte &= ~_PAGE_HPTEFLAGS; old_pte &= ~_PAGE_HPTEFLAGS;
} }
......
...@@ -195,12 +195,12 @@ static void ps3_hpte_clear(void) ...@@ -195,12 +195,12 @@ static void ps3_hpte_clear(void)
void __init ps3_hpte_init(unsigned long htab_size) void __init ps3_hpte_init(unsigned long htab_size)
{ {
ppc_md.hpte_invalidate = ps3_hpte_invalidate; mmu_hash_ops.hpte_invalidate = ps3_hpte_invalidate;
ppc_md.hpte_updatepp = ps3_hpte_updatepp; mmu_hash_ops.hpte_updatepp = ps3_hpte_updatepp;
ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp; mmu_hash_ops.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
ppc_md.hpte_insert = ps3_hpte_insert; mmu_hash_ops.hpte_insert = ps3_hpte_insert;
ppc_md.hpte_remove = ps3_hpte_remove; mmu_hash_ops.hpte_remove = ps3_hpte_remove;
ppc_md.hpte_clear_all = ps3_hpte_clear; mmu_hash_ops.hpte_clear_all = ps3_hpte_clear;
ppc64_pft_size = __ilog2(htab_size); ppc64_pft_size = __ilog2(htab_size);
} }
......
...@@ -591,15 +591,15 @@ __setup("bulk_remove=", disable_bulk_remove); ...@@ -591,15 +591,15 @@ __setup("bulk_remove=", disable_bulk_remove);
void __init hpte_init_lpar(void) void __init hpte_init_lpar(void)
{ {
ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
ppc_md.hpte_insert = pSeries_lpar_hpte_insert; mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
ppc_md.hpte_remove = pSeries_lpar_hpte_remove; mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
} }
#ifdef CONFIG_PPC_SMLPAR #ifdef CONFIG_PPC_SMLPAR
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment