Commit e6ad640b authored by Shakeel Butt's avatar Shakeel Butt Committed by Andrew Morton

mm: deduplicate cacheline padding code

There are three users (mmzone.h, memcontrol.h, page_counter.h) using
similar code for forcing cacheline padding between fields of different
structures.  Dedup that code.

Link: https://lkml.kernel.org/r/20220826230642.566725-1-shakeelb@google.comSigned-off-by: default avatarShakeel Butt <shakeelb@google.com>
Suggested-by: default avatarFeng Tang <feng.tang@intel.com>
Reviewed-by: default avatarFeng Tang <feng.tang@intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 974f4367
...@@ -85,4 +85,17 @@ ...@@ -85,4 +85,17 @@
#define cache_line_size() L1_CACHE_BYTES #define cache_line_size() L1_CACHE_BYTES
#endif #endif
/*
* Helper to add padding within a struct to ensure data fall into separate
* cachelines.
*/
#if defined(CONFIG_SMP)
struct cacheline_padding {
char x[0];
} ____cacheline_internodealigned_in_smp;
#define CACHELINE_PADDING(name) struct cacheline_padding name
#else
#define CACHELINE_PADDING(name)
#endif
#endif /* __LINUX_CACHE_H */ #endif /* __LINUX_CACHE_H */
...@@ -185,15 +185,6 @@ struct mem_cgroup_thresholds { ...@@ -185,15 +185,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare; struct mem_cgroup_threshold_ary *spare;
}; };
#if defined(CONFIG_SMP)
struct memcg_padding {
char x[0];
} ____cacheline_internodealigned_in_smp;
#define MEMCG_PADDING(name) struct memcg_padding name
#else
#define MEMCG_PADDING(name)
#endif
/* /*
* Remember four most recent foreign writebacks with dirty pages in this * Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss * cgroup. Inode sharing is expected to be uncommon and, even if we miss
...@@ -304,7 +295,7 @@ struct mem_cgroup { ...@@ -304,7 +295,7 @@ struct mem_cgroup {
spinlock_t move_lock; spinlock_t move_lock;
unsigned long move_lock_flags; unsigned long move_lock_flags;
MEMCG_PADDING(_pad1_); CACHELINE_PADDING(_pad1_);
/* memory.stat */ /* memory.stat */
struct memcg_vmstats vmstats; struct memcg_vmstats vmstats;
...@@ -326,7 +317,7 @@ struct mem_cgroup { ...@@ -326,7 +317,7 @@ struct mem_cgroup {
struct list_head objcg_list; struct list_head objcg_list;
#endif #endif
MEMCG_PADDING(_pad2_); CACHELINE_PADDING(_pad2_);
/* /*
* set > 0 if pages under this cgroup are moving to other cgroup. * set > 0 if pages under this cgroup are moving to other cgroup.
......
...@@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype) ...@@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data; struct pglist_data;
/*
* Add a wild amount of padding here to ensure data fall into separate
* cachelines. There are very few zone structures in the machine, so space
* consumption is not a concern here.
*/
#if defined(CONFIG_SMP)
struct zone_padding {
char x[0];
} ____cacheline_internodealigned_in_smp;
#define ZONE_PADDING(name) struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
enum numa_stat_item { enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */ NUMA_HIT, /* allocated in intended node */
...@@ -837,7 +823,7 @@ struct zone { ...@@ -837,7 +823,7 @@ struct zone {
int initialized; int initialized;
/* Write-intensive fields used from the page allocator */ /* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_) CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */ /* free areas of different sizes */
struct free_area free_area[MAX_ORDER]; struct free_area free_area[MAX_ORDER];
...@@ -849,7 +835,7 @@ struct zone { ...@@ -849,7 +835,7 @@ struct zone {
spinlock_t lock; spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */ /* Write-intensive fields used by compaction and vmstats. */
ZONE_PADDING(_pad2_) CACHELINE_PADDING(_pad2_);
/* /*
* When free pages are below this point, additional steps are taken * When free pages are below this point, additional steps are taken
...@@ -886,7 +872,7 @@ struct zone { ...@@ -886,7 +872,7 @@ struct zone {
bool contiguous; bool contiguous;
ZONE_PADDING(_pad3_) CACHELINE_PADDING(_pad3_);
/* Zone statistics */ /* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
...@@ -1196,7 +1182,7 @@ typedef struct pglist_data { ...@@ -1196,7 +1182,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */ /* Write-intensive fields used by page reclaim */
ZONE_PADDING(_pad1_) CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/* /*
...@@ -1241,7 +1227,7 @@ typedef struct pglist_data { ...@@ -1241,7 +1227,7 @@ typedef struct pglist_data {
struct lru_gen_mm_walk mm_walk; struct lru_gen_mm_walk mm_walk;
#endif #endif
ZONE_PADDING(_pad2_) CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */ /* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats; struct per_cpu_nodestat __percpu *per_cpu_nodestats;
......
...@@ -7,22 +7,13 @@ ...@@ -7,22 +7,13 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/page.h> #include <asm/page.h>
#if defined(CONFIG_SMP)
struct pc_padding {
char x[0];
} ____cacheline_internodealigned_in_smp;
#define PC_PADDING(name) struct pc_padding name
#else
#define PC_PADDING(name)
#endif
struct page_counter { struct page_counter {
/* /*
* Make sure 'usage' does not share cacheline with any other field. The * Make sure 'usage' does not share cacheline with any other field. The
* memcg->memory.usage is a hot member of struct mem_cgroup. * memcg->memory.usage is a hot member of struct mem_cgroup.
*/ */
atomic_long_t usage; atomic_long_t usage;
PC_PADDING(_pad1_); CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */ /* effective memory.min and memory.min usage tracking */
unsigned long emin; unsigned long emin;
...@@ -38,7 +29,7 @@ struct page_counter { ...@@ -38,7 +29,7 @@ struct page_counter {
unsigned long failcnt; unsigned long failcnt;
/* Keep all the read most fields in a separete cacheline. */ /* Keep all the read most fields in a separete cacheline. */
PC_PADDING(_pad2_); CACHELINE_PADDING(_pad2_);
unsigned long min; unsigned long min;
unsigned long low; unsigned long low;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment