Commit 7265df88 authored by Alexander Viro's avatar Alexander Viro Committed by Linus Torvalds

[PATCH] Clean up __cacheline_aligned

arm-26, ppc, sparc, sparc64 and sh have per-arch definitions of
__cacheline_aligned that are identical to default.  And yes, removal is
safe - all users of __cacheline_aligned actually pull linux/cache.h in.
parent 6c34bb17
...@@ -8,12 +8,4 @@ ...@@ -8,12 +8,4 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
#else
#define __cacheline_aligned \
__attribute__((__aligned__(L1_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
#endif #endif
...@@ -30,14 +30,6 @@ ...@@ -30,14 +30,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define L1_CACHE_PAGES 8 #define L1_CACHE_PAGES 8
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
#else
#define __cacheline_aligned \
__attribute__((__aligned__(L1_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void clean_dcache_range(unsigned long start, unsigned long stop); extern void clean_dcache_range(unsigned long start, unsigned long stop);
extern void flush_dcache_range(unsigned long start, unsigned long stop); extern void flush_dcache_range(unsigned long start, unsigned long stop);
......
...@@ -21,14 +21,6 @@ ...@@ -21,14 +21,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
#else
#define __cacheline_aligned \
__attribute__((__aligned__(L1_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */ #define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
struct cache_info { struct cache_info {
......
...@@ -17,14 +17,6 @@ ...@@ -17,14 +17,6 @@
#define SMP_CACHE_BYTES 32 #define SMP_CACHE_BYTES 32
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#else
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
/* Direct access to the instruction cache is provided through and /* Direct access to the instruction cache is provided through and
* alternate address space. The IDC bit must be off in the ICCR on * alternate address space. The IDC bit must be off in the ICCR on
* HyperSparcs for these accesses to work. The code below does not do * HyperSparcs for these accesses to work. The code below does not do
......
...@@ -14,12 +14,4 @@ ...@@ -14,12 +14,4 @@
#define SMP_CACHE_BYTES_SHIFT 6 #define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#else
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
#endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment