Commit 47d99948 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Move book3s64 specifics in subdirectory mm/book3s64

Many files in arch/powerpc/mm are only for book3S64. This patch
creates a subdirectory for them.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
[mpe: Update the selftest sym links, shorten new filenames, cleanup some
      whitespace and formatting in the new files.]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 9d9f2ccc
...@@ -5,53 +5,34 @@ ...@@ -5,53 +5,34 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
obj-y := fault.o mem.o pgtable.o mmap.o \ obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \ init_$(BITS).o pgtable_$(BITS).o \
init-common.o mmu_context.o drmem.o init-common.o mmu_context.o drmem.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \ obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
$(hash64-y) mmu_context_book3s64.o \ obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-frag.o
pgtable-book3s64.o pgtable-frag.o
obj-$(CONFIG_PPC32) += pgtable-frag.o obj-$(CONFIG_PPC32) += pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o obj-$(CONFIG_PPC_BOOK3S_32) += tlb_hash32.o
ifdef CONFIG_PPC_BOOK3S_64
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
obj-$(CONFIG_40x) += 40x_mmu.o obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o obj-y += hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o
obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
endif endif
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/ obj-$(CONFIG_PPC_PTDUMP) += ptdump/
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
# Disable kcov instrumentation on sensitive code # Disable kcov instrumentation on sensitive code
# This is necessary for booting with kcov enabled on book3e machines # This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb_nohash.o := n KCOV_INSTRUMENT_tlb_nohash.o := n
KCOV_INSTRUMENT_fsl_booke_mmu.o := n KCOV_INSTRUMENT_fsl_booke_mmu.o := n
# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
# SPDX-License-Identifier: GPL-2.0
ccflags-y := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
obj-y += hash_pgtable.o hash_utils.o slb.o \
mmu_context.o pgtable.o hash_tlb.o
obj-$(CONFIG_PPC_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
endif
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
/* /*
* Copyright IBM Corporation, 2015 * Copyright IBM Corporation, 2015
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License * under the terms of version 2 of the GNU Lesser General Public License
......
/* /*
* Copyright IBM Corporation, 2015 * Copyright IBM Corporation, 2015
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License * under the terms of version 2 of the GNU Lesser General Public License
......
/* /*
* Copyright IBM Corporation, 2013 * Copyright IBM Corporation, 2013
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License * under the terms of version 2.1 of the GNU Lesser General Public License
......
...@@ -34,7 +34,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -34,7 +34,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Search the Linux page table for a match with va */ /* Search the Linux page table for a match with va */
vpn = hpt_vpn(ea, vsid, ssize); vpn = hpt_vpn(ea, vsid, ssize);
/* At this point, we have a pte (old_pte) which can be used to build /*
* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases: * or update an HPTE. There are 2 cases:
* *
* 1. There is a valid (present) pte with no associated HPTE (this is * 1. There is a valid (present) pte with no associated HPTE (this is
...@@ -55,8 +56,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -55,8 +56,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (unlikely(!check_pte_access(access, old_pte))) if (unlikely(!check_pte_access(access, old_pte)))
return 1; return 1;
/* Try to lock the PTE, add ACCESSED and DIRTY if it was /*
* a write access */ * Try to lock the PTE, add ACCESSED and DIRTY if it was
* a write access
*/
new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_WRITE) if (access & _PAGE_WRITE)
new_pte |= _PAGE_DIRTY; new_pte |= _PAGE_DIRTY;
...@@ -74,8 +77,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -74,8 +77,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
rpte = __real_pte(__pte(old_pte), ptep, offset); rpte = __real_pte(__pte(old_pte), ptep, offset);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
/* No CPU has hugepages but lacks no execute, so we /*
* don't need to worry about that case */ * No CPU has hugepages but lacks no execute, so we
* don't need to worry about that case
*/
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
/* Check if pte already has an hpte (case 2) */ /* Check if pte already has an hpte (case 2) */
......
...@@ -55,7 +55,8 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -55,7 +55,8 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
i = batch->index; i = batch->index;
/* Get page size (maybe move back to caller). /*
* Get page size (maybe move back to caller).
* *
* NOTE: when using special 64K mappings in 4K environment like * NOTE: when using special 64K mappings in 4K environment like
* for SPEs, we obtain the page size from the slice, which thus * for SPEs, we obtain the page size from the slice, which thus
...@@ -77,10 +78,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -77,10 +78,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
#endif #endif
} else { } else {
psize = pte_pagesize_index(mm, addr, pte); psize = pte_pagesize_index(mm, addr, pte);
/* Mask the address for the standard page size. If we /*
* Mask the address for the standard page size. If we
* have a 64k page kernel, but the hardware does not * have a 64k page kernel, but the hardware does not
* support 64k pages, this might be different from the * support 64k pages, this might be different from the
* hardware page size encoded in the slice table. */ * hardware page size encoded in the slice table.
*/
addr &= PAGE_MASK; addr &= PAGE_MASK;
offset = PTRS_PER_PTE; offset = PTRS_PER_PTE;
} }
...@@ -161,7 +164,8 @@ void hash__tlb_flush(struct mmu_gather *tlb) ...@@ -161,7 +164,8 @@ void hash__tlb_flush(struct mmu_gather *tlb)
{ {
struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
/* If there's a TLB batch pending, then we must flush it because the /*
* If there's a TLB batch pending, then we must flush it because the
* pages are going to be freed and we really don't want to have a CPU * pages are going to be freed and we really don't want to have a CPU
* access a freed page because it has a stale TLB * access a freed page because it has a stale TLB
*/ */
...@@ -201,7 +205,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, ...@@ -201,7 +205,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
BUG_ON(!mm->pgd); BUG_ON(!mm->pgd);
/* Note: Normally, we should only ever use a batch within a /*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work * PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the * since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference * hash while leaving the PTEs intact (including their reference
...@@ -238,7 +243,8 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) ...@@ -238,7 +243,8 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
unsigned long flags; unsigned long flags;
addr = _ALIGN_DOWN(addr, PMD_SIZE); addr = _ALIGN_DOWN(addr, PMD_SIZE);
/* Note: Normally, we should only ever use a batch within a /*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work * PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the * since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference * hash while leaving the PTEs intact (including their reference
......
...@@ -128,7 +128,8 @@ static DEFINE_SPINLOCK(linear_map_hash_lock); ...@@ -128,7 +128,8 @@ static DEFINE_SPINLOCK(linear_map_hash_lock);
struct mmu_hash_ops mmu_hash_ops; struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops); EXPORT_SYMBOL(mmu_hash_ops);
/* There are definitions of page sizes arrays to be used when none /*
* These are definitions of page sizes arrays to be used when none
* is provided by the firmware. * is provided by the firmware.
*/ */
...@@ -145,7 +146,8 @@ static struct mmu_psize_def mmu_psize_defaults[] = { ...@@ -145,7 +146,8 @@ static struct mmu_psize_def mmu_psize_defaults[] = {
}, },
}; };
/* POWER4, GPUL, POWER5 /*
* POWER4, GPUL, POWER5
* *
* Support for 16Mb large pages * Support for 16Mb large pages
*/ */
...@@ -479,7 +481,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, ...@@ -479,7 +481,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
/* Scan for 16G memory blocks that have been set aside for huge pages /*
* Scan for 16G memory blocks that have been set aside for huge pages
* and reserve those blocks for 16G huge pages. * and reserve those blocks for 16G huge pages.
*/ */
static int __init htab_dt_scan_hugepage_blocks(unsigned long node, static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
...@@ -496,8 +499,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, ...@@ -496,8 +499,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0) if (type == NULL || strcmp(type, "memory") != 0)
return 0; return 0;
/* This property is the log base 2 of the number of virtual pages that /*
* will represent this memory block. */ * This property is the log base 2 of the number of virtual pages that
* will represent this memory block.
*/
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL) if (page_count_prop == NULL)
return 0; return 0;
...@@ -673,7 +678,8 @@ static void __init htab_init_page_sizes(void) ...@@ -673,7 +678,8 @@ static void __init htab_init_page_sizes(void)
#endif /* CONFIG_PPC_64K_PAGES */ #endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
/* We try to use 16M pages for vmemmap if that is supported /*
* We try to use 16M pages for vmemmap if that is supported
* and we have at least 1G of RAM at boot * and we have at least 1G of RAM at boot
*/ */
if (mmu_psize_defs[MMU_PAGE_16M].shift && if (mmu_psize_defs[MMU_PAGE_16M].shift &&
...@@ -742,7 +748,8 @@ unsigned htab_shift_for_mem_size(unsigned long mem_size) ...@@ -742,7 +748,8 @@ unsigned htab_shift_for_mem_size(unsigned long mem_size)
static unsigned long __init htab_get_table_size(void) static unsigned long __init htab_get_table_size(void)
{ {
/* If hash size isn't already provided by the platform, we try to /*
* If hash size isn't already provided by the platform, we try to
* retrieve it from the device-tree. If it's not there neither, we * retrieve it from the device-tree. If it's not there neither, we
* calculate it now based on the total RAM size * calculate it now based on the total RAM size
*/ */
...@@ -1043,7 +1050,8 @@ void __init hash__early_init_mmu(void) ...@@ -1043,7 +1050,8 @@ void __init hash__early_init_mmu(void)
if (!mmu_hash_ops.hpte_insert) if (!mmu_hash_ops.hpte_insert)
panic("hash__early_init_mmu: No MMU hash ops defined!\n"); panic("hash__early_init_mmu: No MMU hash ops defined!\n");
/* Initialize the MMU Hash table and create the linear mapping /*
* Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is * of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained. * currently where the page size encoding is obtained.
*/ */
...@@ -1228,7 +1236,8 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, ...@@ -1228,7 +1236,8 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
} }
} }
/* Result code is: /*
* Result code is:
* 0 - handled * 0 - handled
* 1 - normal page fault * 1 - normal page fault
* -1 - critical hash insertion error * -1 - critical hash insertion error
...@@ -1276,8 +1285,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1276,8 +1285,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
ssize = mmu_kernel_ssize; ssize = mmu_kernel_ssize;
break; break;
default: default:
/* Not a valid range /*
* Send the problem up to do_page_fault * Not a valid range
* Send the problem up to do_page_fault()
*/ */
rc = 1; rc = 1;
goto bail; goto bail;
...@@ -1302,7 +1312,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1302,7 +1312,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags |= HPTE_LOCAL_UPDATE; flags |= HPTE_LOCAL_UPDATE;
#ifndef CONFIG_PPC_64K_PAGES #ifndef CONFIG_PPC_64K_PAGES
/* If we use 4K pages and our psize is not 4K, then we might /*
* If we use 4K pages and our psize is not 4K, then we might
* be hitting a special driver mapping, and need to align the * be hitting a special driver mapping, and need to align the
* address before we fetch the PTE. * address before we fetch the PTE.
* *
...@@ -1324,7 +1335,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1324,7 +1335,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
/* Add _PAGE_PRESENT to the required access perm */ /* Add _PAGE_PRESENT to the required access perm */
access |= _PAGE_PRESENT; access |= _PAGE_PRESENT;
/* Pre-check access permissions (will be re-checked atomically /*
* Pre-check access permissions (will be re-checked atomically
* in __hash_page_XX but this pre-check is a fast path * in __hash_page_XX but this pre-check is a fast path
*/ */
if (!check_pte_access(access, pte_val(*ptep))) { if (!check_pte_access(access, pte_val(*ptep))) {
...@@ -1371,7 +1383,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1371,7 +1383,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
psize = MMU_PAGE_4K; psize = MMU_PAGE_4K;
} }
/* If this PTE is non-cacheable and we have restrictions on /*
* If this PTE is non-cacheable and we have restrictions on
* using non cacheable large pages, then we switch to 4k * using non cacheable large pages, then we switch to 4k
*/ */
if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) { if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
...@@ -1412,7 +1425,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, ...@@ -1412,7 +1425,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags, ssize, spp); flags, ssize, spp);
} }
/* Dump some info in case of hash insertion failure, they should /*
* Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do * never happen so it is really useful to know if/when they do
*/ */
if (rc == -1) if (rc == -1)
...@@ -1653,7 +1667,8 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift, ...@@ -1653,7 +1667,8 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
return gslot; return gslot;
} }
/* WARNING: This is called from hash_low_64.S, if you change this prototype, /*
* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site ! * do not forget to update the assembly call site !
*/ */
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
...@@ -1874,7 +1889,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -1874,7 +1889,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
/* We don't currently support the first MEMBLOCK not mapping 0 /*
* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors * physical on those processors
*/ */
BUG_ON(first_memblock_base != 0); BUG_ON(first_memblock_base != 0);
......
...@@ -681,7 +681,8 @@ void radix__mmu_cleanup_all(void) ...@@ -681,7 +681,8 @@ void radix__mmu_cleanup_all(void)
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
/* We don't currently support the first MEMBLOCK not mapping 0 /*
* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors * physical on those processors
*/ */
BUG_ON(first_memblock_base != 0); BUG_ON(first_memblock_base != 0);
...@@ -1003,45 +1004,44 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre ...@@ -1003,45 +1004,44 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable) pgtable_t pgtable)
{ {
struct list_head *lh = (struct list_head *) pgtable; struct list_head *lh = (struct list_head *) pgtable;
assert_spin_locked(pmd_lockptr(mm, pmdp)); assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */ /* FIFO */
if (!pmd_huge_pte(mm, pmdp)) if (!pmd_huge_pte(mm, pmdp))
INIT_LIST_HEAD(lh); INIT_LIST_HEAD(lh);
else else
list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
pmd_huge_pte(mm, pmdp) = pgtable; pmd_huge_pte(mm, pmdp) = pgtable;
} }
pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{ {
pte_t *ptep; pte_t *ptep;
pgtable_t pgtable; pgtable_t pgtable;
struct list_head *lh; struct list_head *lh;
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
pgtable = pmd_huge_pte(mm, pmdp);
lh = (struct list_head *) pgtable;
if (list_empty(lh))
pmd_huge_pte(mm, pmdp) = NULL;
else {
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
list_del(lh);
}
ptep = (pte_t *) pgtable;
*ptep = __pte(0);
ptep++;
*ptep = __pte(0);
return pgtable;
}
assert_spin_locked(pmd_lockptr(mm, pmdp));
/* FIFO */
pgtable = pmd_huge_pte(mm, pmdp);
lh = (struct list_head *) pgtable;
if (list_empty(lh))
pmd_huge_pte(mm, pmdp) = NULL;
else {
pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
list_del(lh);
}
ptep = (pte_t *) pgtable;
*ptep = __pte(0);
ptep++;
*ptep = __pte(0);
return pgtable;
}
pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
pmd_t old_pmd; pmd_t old_pmd;
unsigned long old; unsigned long old;
......
...@@ -554,7 +554,8 @@ void slb_initialize(void) ...@@ -554,7 +554,8 @@ void slb_initialize(void)
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
/* For the boot cpu, we're running on the stack in init_thread_union, /*
* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also * which is in the first segment of the linear mapping, and also
* get_paca()->kstack hasn't been initialized yet. * get_paca()->kstack hasn't been initialized yet.
* For secondary cpus, we need to bolt the kernel stack entry now. * For secondary cpus, we need to bolt the kernel stack entry now.
......
...@@ -42,7 +42,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked) ...@@ -42,7 +42,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
u16 new = be16_to_cpup(field++); u16 new = be16_to_cpup(field++);
if (is_32bit) { if (is_32bit) {
/* Let's concatenate the 16 bits of this field to the /*
* Let's concatenate the 16 bits of this field to the
* 15 lower bits of the previous field * 15 lower bits of the previous field
*/ */
unpacked[++nr_assoc_doms] = unpacked[++nr_assoc_doms] =
...@@ -56,7 +57,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked) ...@@ -56,7 +57,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
unpacked[++nr_assoc_doms] = unpacked[++nr_assoc_doms] =
cpu_to_be32(new & VPHN_FIELD_MASK); cpu_to_be32(new & VPHN_FIELD_MASK);
} else { } else {
/* Data is in the lower 15 bits of this field /*
* Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field * concatenated with the next 16 bit field
*/ */
last = new; last = new;
......
...@@ -2,8 +2,7 @@ ...@@ -2,8 +2,7 @@
#ifndef _ARCH_POWERPC_MM_VPHN_H_ #ifndef _ARCH_POWERPC_MM_VPHN_H_
#define _ARCH_POWERPC_MM_VPHN_H_ #define _ARCH_POWERPC_MM_VPHN_H_
/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. /* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
*/
#define VPHN_REGISTER_COUNT 6 #define VPHN_REGISTER_COUNT 6
/* /*
......
...@@ -1068,7 +1068,7 @@ u64 memory_hotplug_max(void) ...@@ -1068,7 +1068,7 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */ /* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR #ifdef CONFIG_PPC_SPLPAR
#include "vphn.h" #include "book3s64/vphn.h"
struct topology_update_data { struct topology_update_data {
struct topology_update_data *next; struct topology_update_data *next;
......
../../../../../arch/powerpc/mm/vphn.c ../../../../../arch/powerpc/mm/book3s64/vphn.c
\ No newline at end of file \ No newline at end of file
../../../../../arch/powerpc/mm/vphn.h ../../../../../arch/powerpc/mm/book3s64/vphn.h
\ No newline at end of file \ No newline at end of file
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment