Commit fc9bb8c7 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg

mm: Rearrange struct page

We need to be able to use cmpxchg_double on the freelist and object count
field in struct page. Rearrange the fields in struct page according to
doubleword entities so that the freelist pointer comes before the counters.
Do the rearranging with a future in mind where we use more doubleword
atomics to avoid locking of updates to flags/mapping or lru pointers.

Create another union to allow access to counters in struct page as a
single unsigned long value.

The doublewords must be properly aligned for cmpxchg_double to work.
Sadly this increases the size of page struct by one word on some architectures.
But as a resultpage structs are now cacheline aligned on x86_64.
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 8cb0a506
...@@ -30,24 +30,60 @@ struct address_space; ...@@ -30,24 +30,60 @@ struct address_space;
* moment. Note that we have no way to track which tasks are using * moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us * a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it. * who is mapping it.
*
* The objects in struct page are organized in double word blocks in
* order to allows us to use atomic double word operations on portions
* of struct page. That is currently only used by slub but the arrangement
* allows the use of atomic double word operations on the flags/mapping
* and lru list pointers also.
*/ */
struct page { struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */ * updated asynchronously */
atomic_t _count; /* Usage count, see below. */ struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
/* Second double word */
union { union {
struct {
pgoff_t index; /* Our offset within mapping. */
atomic_t _mapcount; /* Count of ptes mapped in mms, atomic_t _mapcount; /* Count of ptes mapped in mms,
* to show when page is mapped * to show when page is mapped
* & limit reverse map searches. * & limit reverse map searches.
*/ */
struct { /* SLUB */ atomic_t _count; /* Usage count, see below. */
};
struct { /* SLUB cmpxchg_double area */
void *freelist;
union {
unsigned long counters;
struct {
unsigned inuse:16; unsigned inuse:16;
unsigned objects:15; unsigned objects:15;
unsigned frozen:1; unsigned frozen:1;
/*
* Kernel may make use of this field even when slub
* uses the rest of the double word!
*/
atomic_t _count;
}; };
}; };
};
};
/* Third double word block */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
/* Remainder is not double word aligned */
union { union {
struct {
unsigned long private; /* Mapping-private opaque data: unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads * usually used for buffer_heads
* if PagePrivate set; used for * if PagePrivate set; used for
...@@ -55,27 +91,13 @@ struct page { ...@@ -55,27 +91,13 @@ struct page {
* indicates order in the buddy * indicates order in the buddy
* system if PG_buddy is set. * system if PG_buddy is set.
*/ */
struct address_space *mapping; /* If low bit clear, points to
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
* it points to anon_vma object:
* see PAGE_MAPPING_ANON below.
*/
};
#if USE_SPLIT_PTLOCKS #if USE_SPLIT_PTLOCKS
spinlock_t ptl; spinlock_t ptl;
#endif #endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */ struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct page *first_page; /* Compound tail pages */ struct page *first_page; /* Compound tail pages */
}; };
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* SLUB: freelist req. slab lock */
};
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
/* /*
* On machines where all RAM is mapped into kernel address space, * On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with * we can simply calculate the virtual address. On machines with
...@@ -101,7 +123,16 @@ struct page { ...@@ -101,7 +123,16 @@ struct page {
*/ */
void *shadow; void *shadow;
#endif #endif
}; }
/*
* If another subsystem starts using the double word pairing for atomic
* operations on struct page then it must change the #if to ensure
* proper alignment of the page struct.
*/
#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
__attribute__((__aligned__(2*sizeof(unsigned long))))
#endif
;
typedef unsigned long __nocast vm_flags_t; typedef unsigned long __nocast vm_flags_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment