Commit 25078dc1 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc: use mm zones more sensibly

Powerpc has somewhat odd usage where ZONE_DMA is used for all memory on
common 64-bit configfs, and ZONE_DMA32 is used for 31-bit schemes.

Move to a scheme closer to what other architectures use (and I dare to
say the intent of the system):

 - ZONE_DMA: optionally for memory < 31-bit (64-bit embedded only)
 - ZONE_NORMAL: everything addressable by the kernel
 - ZONE_HIGHMEM: memory > 32-bit for 32-bit kernels

Also provide information on how ZONE_DMA is used by defining
ARCH_ZONE_DMA_BITS.

Contains various fixes from Benjamin Herrenschmidt.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent f3e5a857
...@@ -375,9 +375,9 @@ config PPC_ADV_DEBUG_DAC_RANGE ...@@ -375,9 +375,9 @@ config PPC_ADV_DEBUG_DAC_RANGE
depends on PPC_ADV_DEBUG_REGS && 44x depends on PPC_ADV_DEBUG_REGS && 44x
default y default y
config ZONE_DMA32 config ZONE_DMA
bool bool
default y if PPC64 default y if PPC_BOOK3E_64
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
...@@ -870,10 +870,6 @@ config ISA ...@@ -870,10 +870,6 @@ config ISA
have an IBM RS/6000 or pSeries machine, say Y. If you have an have an IBM RS/6000 or pSeries machine, say Y. If you have an
embedded board, consult your board documentation. embedded board, consult your board documentation.
config ZONE_DMA
bool
default y
config GENERIC_ISA_DMA config GENERIC_ISA_DMA
bool bool
depends on ISA_DMA_API depends on ISA_DMA_API
......
...@@ -340,4 +340,6 @@ struct vm_area_struct; ...@@ -340,4 +340,6 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <asm/slice.h> #include <asm/slice.h>
#define ARCH_ZONE_DMA_BITS 31
#endif /* _ASM_POWERPC_PAGE_H */ #endif /* _ASM_POWERPC_PAGE_H */
...@@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[]; ...@@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[]; extern pgd_t swapper_pg_dir[];
void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
int dma_pfn_limit_to_zone(u64 pfn_limit); int dma_pfn_limit_to_zone(u64 pfn_limit);
extern void paging_init(void); extern void paging_init(void);
......
...@@ -108,12 +108,8 @@ int __init swiotlb_setup_bus_notifier(void) ...@@ -108,12 +108,8 @@ int __init swiotlb_setup_bus_notifier(void)
void __init swiotlb_detect_4g(void) void __init swiotlb_detect_4g(void)
{ {
if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
ppc_swiotlb_enable = 1; ppc_swiotlb_enable = 1;
#ifdef CONFIG_ZONE_DMA32
limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT);
#endif
}
} }
static int __init check_swiotlb_enabled(void) static int __init check_swiotlb_enabled(void)
......
...@@ -50,7 +50,8 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask) ...@@ -50,7 +50,8 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
#ifdef CONFIG_FSL_SOC #ifdef CONFIG_FSL_SOC
/* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however /*
* Freescale gets another chance via ZONE_DMA, however
* that will have to be refined if/when they support iommus * that will have to be refined if/when they support iommus
*/ */
return 1; return 1;
...@@ -88,13 +89,10 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, ...@@ -88,13 +89,10 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
} }
switch (zone) { switch (zone) {
#ifdef CONFIG_ZONE_DMA
case ZONE_DMA: case ZONE_DMA:
flag |= GFP_DMA; flag |= GFP_DMA;
break; break;
#ifdef CONFIG_ZONE_DMA32
case ZONE_DMA32:
flag |= GFP_DMA32;
break;
#endif #endif
}; };
#endif /* CONFIG_FSL_SOC */ #endif /* CONFIG_FSL_SOC */
......
...@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void) ...@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void)
} }
#endif #endif
static bool zone_limits_final;
/* /*
* The memory zones past TOP_ZONE are managed by generic mm code. * Zones usage:
* These should be set to zero since that's what every other *
* architecture does. * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
* everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA.
*
* By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
* inform the generic DMA mapping code. 32-bit only devices (if not handled
* by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
* otherwise served by ZONE_DMA.
*/ */
static unsigned long max_zone_pfns[MAX_NR_ZONES] = { static unsigned long max_zone_pfns[MAX_NR_ZONES];
[0 ... TOP_ZONE ] = ~0UL,
[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
};
/*
* Restrict the specified zone and all more restrictive zones
* to be below the specified pfn. May not be called after
* paging_init().
*/
void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
{
int i;
if (WARN_ON(zone_limits_final))
return;
for (i = zone; i >= 0; i--) {
if (max_zone_pfns[i] > pfn_limit)
max_zone_pfns[i] = pfn_limit;
}
}
/* /*
* Find the least restrictive zone that is entirely below the * Find the least restrictive zone that is entirely below the
...@@ -324,11 +308,14 @@ void __init paging_init(void) ...@@ -324,11 +308,14 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n", printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20)); (long int)((top_of_ram - total_ram) >> 20));
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
#endif #endif
limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
zone_limits_final = true;
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
mark_nonram_nosave(); mark_nonram_nosave();
......
...@@ -68,16 +68,6 @@ void __init corenet_gen_setup_arch(void) ...@@ -68,16 +68,6 @@ void __init corenet_gen_setup_arch(void)
swiotlb_detect_4g(); swiotlb_detect_4g();
#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
/*
* Inbound windows don't cover the full lower 4 GiB
* due to conflicts with PCICSRBAR and outbound windows,
* so limit the DMA32 zone to 2 GiB, to allow consistent
* allocations to succeed.
*/
limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
#endif
pr_info("%s board\n", ppc_md.name); pr_info("%s board\n", ppc_md.name);
mpc85xx_qe_init(); mpc85xx_qe_init();
......
...@@ -45,15 +45,6 @@ static void __init qemu_e500_setup_arch(void) ...@@ -45,15 +45,6 @@ static void __init qemu_e500_setup_arch(void)
fsl_pci_assign_primary(); fsl_pci_assign_primary();
swiotlb_detect_4g(); swiotlb_detect_4g();
#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
/*
* Inbound windows don't cover the full lower 4 GiB
* due to conflicts with PCICSRBAR and outbound windows,
* so limit the DMA32 zone to 2 GiB, to allow consistent
* allocations to succeed.
*/
limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
#endif
mpc85xx_smp_init(); mpc85xx_smp_init();
} }
......
...@@ -314,7 +314,7 @@ enum zone_type { ...@@ -314,7 +314,7 @@ enum zone_type {
* Architecture Limit * Architecture Limit
* --------------------------- * ---------------------------
* parisc, ia64, sparc <4G * parisc, ia64, sparc <4G
* s390 <2G * s390, powerpc <2G
* arm Various * arm Various
* alpha Unlimited or 0-16MB. * alpha Unlimited or 0-16MB.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment