Commit 4b51d669 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM

Make ZONE_DMA optional in core code.

- ifdef all code for ZONE_DMA and related definitions following the example
  for ZONE_DMA32 and ZONE_HIGHMEM.

- Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of
  0.

- Modify the VM statistics to work correctly without a DMA zone.

- Modify slab to not create DMA slabs if there is no ZONE_DMA.

[akpm@osdl.org: cleanup]
[jdike@addtoit.com: build fix]
[apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT]
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Matthew Wilcox <willy@debian.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: default avatarAndy Whitcroft <apw@shadowen.org>
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 66701b14
...@@ -85,8 +85,10 @@ struct vm_area_struct; ...@@ -85,8 +85,10 @@ struct vm_area_struct;
static inline enum zone_type gfp_zone(gfp_t flags) static inline enum zone_type gfp_zone(gfp_t flags)
{ {
#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA) if (flags & __GFP_DMA)
return ZONE_DMA; return ZONE_DMA;
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32) if (flags & __GFP_DMA32)
return ZONE_DMA32; return ZONE_DMA32;
......
...@@ -96,6 +96,7 @@ struct per_cpu_pageset { ...@@ -96,6 +96,7 @@ struct per_cpu_pageset {
#endif #endif
enum zone_type { enum zone_type {
#ifdef CONFIG_ZONE_DMA
/* /*
* ZONE_DMA is used when there are devices that are not able * ZONE_DMA is used when there are devices that are not able
* to do DMA to all of addressable memory (ZONE_NORMAL). Then we * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
...@@ -116,6 +117,7 @@ enum zone_type { ...@@ -116,6 +117,7 @@ enum zone_type {
* <16M. * <16M.
*/ */
ZONE_DMA, ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
/* /*
* x86_64 needs two ZONE_DMAs because it supports devices that are * x86_64 needs two ZONE_DMAs because it supports devices that are
...@@ -152,11 +154,27 @@ enum zone_type { ...@@ -152,11 +154,27 @@ enum zone_type {
* match the requested limits. See gfp_zone() in include/linux/gfp.h * match the requested limits. See gfp_zone() in include/linux/gfp.h
*/ */
#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) /*
* Count the active zones. Note that the use of defined(X) outside
* #if and family is not necessarily defined so ensure we cannot use
* it later. Use __ZONE_COUNT to work out how many shift bits we need.
*/
#define __ZONE_COUNT ( \
defined(CONFIG_ZONE_DMA) \
+ defined(CONFIG_ZONE_DMA32) \
+ 1 \
+ defined(CONFIG_HIGHMEM) \
)
#if __ZONE_COUNT < 2
#define ZONES_SHIFT 0
#elif __ZONE_COUNT <= 2
#define ZONES_SHIFT 1 #define ZONES_SHIFT 1
#else #elif __ZONE_COUNT <= 4
#define ZONES_SHIFT 2 #define ZONES_SHIFT 2
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif #endif
#undef __ZONE_COUNT
struct zone { struct zone {
/* Fields commonly accessed by the page allocator */ /* Fields commonly accessed by the page allocator */
...@@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone) ...@@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone)
static inline int is_dma(struct zone *zone) static inline int is_dma(struct zone *zone)
{ {
#ifdef CONFIG_ZONE_DMA
return zone == zone->zone_pgdat->node_zones + ZONE_DMA; return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
#else
return 0;
#endif
} }
/* These two functions are used to setup the per zone pages min values */ /* These two functions are used to setup the per zone pages min values */
......
...@@ -19,7 +19,9 @@ ...@@ -19,7 +19,9 @@
struct cache_sizes { struct cache_sizes {
size_t cs_size; size_t cs_size;
struct kmem_cache *cs_cachep; struct kmem_cache *cs_cachep;
#ifdef CONFIG_ZONE_DMA
struct kmem_cache *cs_dmacachep; struct kmem_cache *cs_dmacachep;
#endif
}; };
extern struct cache_sizes malloc_sizes[]; extern struct cache_sizes malloc_sizes[];
...@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags) ...@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags)
__you_cannot_kmalloc_that_much(); __you_cannot_kmalloc_that_much();
} }
found: found:
return kmem_cache_alloc((flags & GFP_DMA) ? #ifdef CONFIG_ZONE_DMA
malloc_sizes[i].cs_dmacachep : if (flags & GFP_DMA)
malloc_sizes[i].cs_cachep, flags); return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
flags);
#endif
return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
} }
return __kmalloc(size, flags); return __kmalloc(size, flags);
} }
...@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags) ...@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags)
__you_cannot_kzalloc_that_much(); __you_cannot_kzalloc_that_much();
} }
found: found:
return kmem_cache_zalloc((flags & GFP_DMA) ? #ifdef CONFIG_ZONE_DMA
malloc_sizes[i].cs_dmacachep : if (flags & GFP_DMA)
malloc_sizes[i].cs_cachep, flags); return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
flags);
#endif
return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
} }
return __kzalloc(size, flags); return __kzalloc(size, flags);
} }
...@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
__you_cannot_kmalloc_that_much(); __you_cannot_kmalloc_that_much();
} }
found: found:
return kmem_cache_alloc_node((flags & GFP_DMA) ? #ifdef CONFIG_ZONE_DMA
malloc_sizes[i].cs_dmacachep : if (flags & GFP_DMA)
malloc_sizes[i].cs_cachep, flags, node); return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
flags, node);
#endif
return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
flags, node);
} }
return __kmalloc_node(size, flags, node); return __kmalloc_node(size, flags, node);
} }
......
...@@ -19,6 +19,12 @@ ...@@ -19,6 +19,12 @@
* generated will simply be the increment of a global address. * generated will simply be the increment of a global address.
*/ */
#ifdef CONFIG_ZONE_DMA
#define DMA_ZONE(xx) xx##_DMA,
#else
#define DMA_ZONE(xx)
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
#define DMA32_ZONE(xx) xx##_DMA32, #define DMA32_ZONE(xx) xx##_DMA32,
#else #else
...@@ -31,7 +37,7 @@ ...@@ -31,7 +37,7 @@
#define HIGHMEM_ZONE(xx) #define HIGHMEM_ZONE(xx)
#endif #endif
#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC), FOR_ALL_ZONES(PGALLOC),
...@@ -96,7 +102,8 @@ static inline void vm_events_fold_cpu(int cpu) ...@@ -96,7 +102,8 @@ static inline void vm_events_fold_cpu(int cpu)
#endif /* CONFIG_VM_EVENT_COUNTERS */ #endif /* CONFIG_VM_EVENT_COUNTERS */
#define __count_zone_vm_events(item, zone, delta) \ #define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_DMA + zone_idx(zone), delta) __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
/* /*
* Zone based page accounting with per cpu differentials. * Zone based page accounting with per cpu differentials.
...@@ -143,14 +150,16 @@ static inline unsigned long node_page_state(int node, ...@@ -143,14 +150,16 @@ static inline unsigned long node_page_state(int node,
struct zone *zones = NODE_DATA(node)->node_zones; struct zone *zones = NODE_DATA(node)->node_zones;
return return
#ifdef CONFIG_ZONE_DMA
zone_page_state(&zones[ZONE_DMA], item) +
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
zone_page_state(&zones[ZONE_DMA32], item) + zone_page_state(&zones[ZONE_DMA32], item) +
#endif #endif
zone_page_state(&zones[ZONE_NORMAL], item) +
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) + zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif #endif
zone_page_state(&zones[ZONE_DMA], item); zone_page_state(&zones[ZONE_NORMAL], item);
} }
extern void zone_statistics(struct zonelist *, struct zone *); extern void zone_statistics(struct zonelist *, struct zone *);
......
...@@ -161,3 +161,9 @@ config RESOURCES_64BIT ...@@ -161,3 +161,9 @@ config RESOURCES_64BIT
default 64BIT default 64BIT
help help
This option allows memory and IO resources to be 64 bit. This option allows memory and IO resources to be 64 bit.
config ZONE_DMA_FLAG
int
default "0" if !ZONE_DMA
default "1"
...@@ -73,7 +73,9 @@ static void __free_pages_ok(struct page *page, unsigned int order); ...@@ -73,7 +73,9 @@ static void __free_pages_ok(struct page *page, unsigned int order);
* don't need any ZONE_NORMAL reservation * don't need any ZONE_NORMAL reservation
*/ */
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
#ifdef CONFIG_ZONE_DMA
256, 256,
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
256, 256,
#endif #endif
...@@ -85,7 +87,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { ...@@ -85,7 +87,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
EXPORT_SYMBOL(totalram_pages); EXPORT_SYMBOL(totalram_pages);
static char * const zone_names[MAX_NR_ZONES] = { static char * const zone_names[MAX_NR_ZONES] = {
#ifdef CONFIG_ZONE_DMA
"DMA", "DMA",
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
"DMA32", "DMA32",
#endif #endif
......
...@@ -793,8 +793,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size, ...@@ -793,8 +793,10 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
* has cs_{dma,}cachep==NULL. Thus no special case * has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required. * for large kmalloc calls required.
*/ */
#ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA)) if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep; return csizep->cs_dmacachep;
#endif
return csizep->cs_cachep; return csizep->cs_cachep;
} }
...@@ -1493,13 +1495,15 @@ void __init kmem_cache_init(void) ...@@ -1493,13 +1495,15 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC, ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL); NULL, NULL);
} }
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_create(names->name_dma, sizes->cs_dmacachep = kmem_cache_create(
names->name_dma,
sizes->cs_size, sizes->cs_size,
ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC, SLAB_PANIC,
NULL, NULL); NULL, NULL);
#endif
sizes++; sizes++;
names++; names++;
} }
...@@ -2321,7 +2325,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -2321,7 +2325,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->slab_size = slab_size; cachep->slab_size = slab_size;
cachep->flags = flags; cachep->flags = flags;
cachep->gfpflags = 0; cachep->gfpflags = 0;
if (flags & SLAB_CACHE_DMA) if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->gfpflags |= GFP_DMA; cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size; cachep->buffer_size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size); cachep->reciprocal_buffer_size = reciprocal_value(size);
...@@ -2643,10 +2647,12 @@ static void cache_init_objs(struct kmem_cache *cachep, ...@@ -2643,10 +2647,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{ {
if (flags & GFP_DMA) if (CONFIG_ZONE_DMA_FLAG) {
BUG_ON(!(cachep->gfpflags & GFP_DMA)); if (flags & GFP_DMA)
else BUG_ON(!(cachep->gfpflags & GFP_DMA));
BUG_ON(cachep->gfpflags & GFP_DMA); else
BUG_ON(cachep->gfpflags & GFP_DMA);
}
} }
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
......
...@@ -408,6 +408,12 @@ const struct seq_operations fragmentation_op = { ...@@ -408,6 +408,12 @@ const struct seq_operations fragmentation_op = {
.show = frag_show, .show = frag_show,
}; };
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
#else
#define TEXT_FOR_DMA(xx)
#endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
#define TEXT_FOR_DMA32(xx) xx "_dma32", #define TEXT_FOR_DMA32(xx) xx "_dma32",
#else #else
...@@ -420,7 +426,7 @@ const struct seq_operations fragmentation_op = { ...@@ -420,7 +426,7 @@ const struct seq_operations fragmentation_op = {
#define TEXT_FOR_HIGHMEM(xx) #define TEXT_FOR_HIGHMEM(xx)
#endif #endif
#define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
TEXT_FOR_HIGHMEM(xx) TEXT_FOR_HIGHMEM(xx)
static const char * const vmstat_text[] = { static const char * const vmstat_text[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment