Commit 1cfb7a16 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ensure that the per-zone locks fall in separate cachelines

Use the new max cache alignment to optimise the layout of struct zone.

struct zone goes from ~270 bytes (UP) to 768 bytes (SMP, x86).  This is
not a trick which should be generally used.
parent f9da78fb
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/cache.h>
#include <asm/atomic.h> #include <asm/atomic.h>
/* /*
...@@ -27,6 +28,21 @@ typedef struct free_area_struct { ...@@ -27,6 +28,21 @@ typedef struct free_area_struct {
struct pglist_data; struct pglist_data;
/*
* zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
* So add a wild amount of padding here to ensure that they fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
*/
#if defined(CONFIG_SMP)
struct zone_padding {
int x;
} ____cacheline_maxaligned_in_smp;
#define ZONE_PADDING(name) struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif
/* /*
* On machines where it is needed (eg PCs) we divide physical memory * On machines where it is needed (eg PCs) we divide physical memory
* into multiple physical zones. On a PC we have 3 zones: * into multiple physical zones. On a PC we have 3 zones:
...@@ -35,6 +51,7 @@ struct pglist_data; ...@@ -35,6 +51,7 @@ struct pglist_data;
* ZONE_NORMAL 16-896 MB direct mapped by the kernel * ZONE_NORMAL 16-896 MB direct mapped by the kernel
* ZONE_HIGHMEM > 896 MB only page cache and user processes * ZONE_HIGHMEM > 896 MB only page cache and user processes
*/ */
struct zone { struct zone {
/* /*
* Commonly accessed fields: * Commonly accessed fields:
...@@ -44,6 +61,8 @@ struct zone { ...@@ -44,6 +61,8 @@ struct zone {
unsigned long pages_min, pages_low, pages_high; unsigned long pages_min, pages_low, pages_high;
int need_balance; int need_balance;
ZONE_PADDING(_pad1_)
spinlock_t lru_lock; spinlock_t lru_lock;
struct list_head active_list; struct list_head active_list;
struct list_head inactive_list; struct list_head inactive_list;
...@@ -51,6 +70,8 @@ struct zone { ...@@ -51,6 +70,8 @@ struct zone {
unsigned long nr_active; unsigned long nr_active;
unsigned long nr_inactive; unsigned long nr_inactive;
ZONE_PADDING(_pad2_)
/* /*
* free areas of different sizes * free areas of different sizes
*/ */
...@@ -97,7 +118,7 @@ struct zone { ...@@ -97,7 +118,7 @@ struct zone {
*/ */
char *name; char *name;
unsigned long size; unsigned long size;
}; } ____cacheline_maxaligned_in_smp;
#define ZONE_DMA 0 #define ZONE_DMA 0
#define ZONE_NORMAL 1 #define ZONE_NORMAL 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment