Commit 9223b419 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

pageflags: get rid of FLAGS_RESERVED

NR_PAGEFLAGS specifies the number of page flags we are using.  From that we
can calculate the number of bits leftover that can be used for zone, node (and
maybe the sections id).  There is no need anymore for FLAGS_RESERVED if we use
NR_PAGEFLAGS.

Use the new methods to make NR_PAGEFLAGS available via the preprocessor.
NR_PAGEFLAGS is used to calculate field boundaries in the page flags fields.
These field widths have to be available to the preprocessor.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: David Miller <davem@davemloft.net>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e2683181
......@@ -1699,9 +1699,21 @@ void __init paging_init(void)
* functions like clear_dcache_dirty_cpu use the cpu mask
* in 13-bit signed-immediate instruction fields.
*/
BUILD_BUG_ON(FLAGS_RESERVED != 32);
/*
* Page flags must not reach into upper 32 bits that are used
* for the cpu number
*/
BUILD_BUG_ON(NR_PAGEFLAGS > 32);
/*
* The bit fields placed in the high range must not reach below
* the 32 bit boundary. Otherwise we cannot place the cpu field
* at the 32 bit boundary.
*/
BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
BUILD_BUG_ON(NR_CPUS > 4096);
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
......
......@@ -407,7 +407,7 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONES_WIDTH ZONES_SHIFT
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#else
#ifdef CONFIG_SPARSEMEM_VMEMMAP
......@@ -455,8 +455,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
......
......@@ -820,25 +820,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#include <asm/sparsemem.h>
#endif
#if BITS_PER_LONG == 32
/*
* with 32 bit page->flags field, we reserve 9 bits for node/zone info.
* there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
*/
#define FLAGS_RESERVED 9
#elif BITS_PER_LONG == 64
/*
* with 64 bit flags field, there's plenty of room.
*/
#define FLAGS_RESERVED 32
#else
#error BITS_PER_LONG not defined
#endif
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
......
......@@ -6,7 +6,10 @@
#define PAGE_FLAGS_H
#include <linux/types.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
#include <linux/bounds.h>
#endif /* !__GENERATING_BOUNDS_H */
/*
* Various page->flags bits:
......@@ -60,12 +63,11 @@
*
* | FIELD | ... | FLAGS |
* N-1 ^ 0
* (N-FLAGS_RESERVED)
* (NR_PAGEFLAGS)
*
* The fields area is reserved for fields mapping zone, node and SPARSEMEM
* section. The boundry between these two areas is defined by
* FLAGS_RESERVED which defines the width of the fields section
* (see linux/mmzone.h). New flags must _not_ overlap with this area.
* The fields area is reserved for fields mapping zone, node (for NUMA) and
* SPARSEMEM section (for variants of SPARSEMEM that require section ids like
* SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
......@@ -101,9 +103,11 @@ enum pageflags {
*/
PG_uncached = 31, /* Page has been mapped as uncached */
#endif
NR_PAGEFLAGS
__NR_PAGEFLAGS
};
#ifndef __GENERATING_BOUNDS_H
/*
* Manipulation of page state flags
*/
......@@ -304,4 +308,5 @@ static inline void set_page_writeback(struct page *page)
test_set_page_writeback(page);
}
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
......@@ -6,6 +6,7 @@
#define __GENERATING_BOUNDS_H
/* Include headers that define the enum constants of interest */
#include <linux/page-flags.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
......@@ -15,5 +16,6 @@
void foo(void)
{
/* The enum constants to put into include/linux/bounds.h */
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
/* End of constants */
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment