Commit 4af57b78 authored by Tim Abbott's avatar Tim Abbott Committed by Michal Marek

Rename .data.cacheline_aligned to .data..cacheline_aligned.

Signed-off-by: default avatarTim Abbott <tabbott@ksplice.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarDenys Vlasenko <vda.linux@googlemail.com>
Signed-off-by: default avatarMichal Marek <mmarek@suse.cz>
parent bc75cc6b
......@@ -231,7 +231,7 @@ SECTIONS
PAGE_ALIGNED_DATA(PAGE_SIZE)
}
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
}
......
......@@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data.cacheline_aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
......
......@@ -189,7 +189,7 @@
#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
*(.data.cacheline_aligned)
*(.data..cacheline_aligned)
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
......
......@@ -31,7 +31,7 @@
#ifndef __cacheline_aligned
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
__section__(".data.cacheline_aligned")))
__section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */
#ifndef __cacheline_aligned_in_smp
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment