Commit 5129df03 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-2.6.40' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

* 'for-2.6.40' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: Unify input section names
  percpu: Avoid extra NOP in percpu_cmpxchg16b_double
  percpu: Cast away printk format warning
  percpu: Always align percpu output section to PAGE_SIZE

Fix up fairly trivial conflict in arch/x86/include/asm/percpu.h as per Tejun
parents 4d429480 6988f20f
...@@ -39,7 +39,7 @@ SECTIONS ...@@ -39,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE); __init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE) INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(L1_CACHE_BYTES)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page /* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */ needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE); . = ALIGN(THREAD_SIZE);
......
...@@ -82,7 +82,7 @@ SECTIONS ...@@ -82,7 +82,7 @@ SECTIONS
#endif #endif
} }
PERCPU(32, PAGE_SIZE) PERCPU_SECTION(32)
#ifndef CONFIG_XIP_KERNEL #ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
......
...@@ -136,7 +136,7 @@ SECTIONS ...@@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16); . = ALIGN(16);
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE) PERCPU_SECTION(32)
.exit.data : .exit.data :
{ {
......
...@@ -102,7 +102,7 @@ SECTIONS ...@@ -102,7 +102,7 @@ SECTIONS
#endif #endif
__vmlinux_end = .; /* Last address of the physical file. */ __vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32 #ifdef CONFIG_ETRAX_ARCH_V32
PERCPU(32, PAGE_SIZE) PERCPU_SECTION(32)
.init.ramfs : { .init.ramfs : {
INIT_RAM_FS INIT_RAM_FS
......
...@@ -37,7 +37,7 @@ SECTIONS ...@@ -37,7 +37,7 @@ SECTIONS
_einittext = .; _einittext = .;
INIT_DATA_SECTION(8) INIT_DATA_SECTION(8)
PERCPU(L1_CACHE_BYTES, 4096) PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
......
...@@ -54,7 +54,7 @@ SECTIONS ...@@ -54,7 +54,7 @@ SECTIONS
__init_begin = .; __init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE) INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE) PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
/* freed after init ends here */ /* freed after init ends here */
......
...@@ -118,7 +118,7 @@ SECTIONS ...@@ -118,7 +118,7 @@ SECTIONS
EXIT_DATA EXIT_DATA
} }
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
/* freed after init ends here */ /* freed after init ends here */
......
...@@ -70,7 +70,7 @@ SECTIONS ...@@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; } .exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; } .exit.data : { EXIT_DATA; }
PERCPU(32, PAGE_SIZE) PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
/* freed after init ends here */ /* freed after init ends here */
......
...@@ -149,7 +149,7 @@ SECTIONS ...@@ -149,7 +149,7 @@ SECTIONS
EXIT_DATA EXIT_DATA
} }
PERCPU(L1_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
/* freed after init ends here */ /* freed after init ends here */
......
...@@ -160,7 +160,7 @@ SECTIONS ...@@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS INIT_RAM_FS
} }
PERCPU(L1_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(8); . = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
......
...@@ -77,7 +77,7 @@ SECTIONS ...@@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100) INIT_DATA_SECTION(0x100)
PERCPU(0x100, PAGE_SIZE) PERCPU_SECTION(0x100)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */ __init_end = .; /* freed after init ends here */
......
...@@ -66,7 +66,7 @@ SECTIONS ...@@ -66,7 +66,7 @@ SECTIONS
__machvec_end = .; __machvec_end = .;
} }
PERCPU(L1_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(L1_CACHE_BYTES)
/* /*
* .exit.text is discarded at runtime, not link time, to deal with * .exit.text is discarded at runtime, not link time, to deal with
......
...@@ -108,7 +108,7 @@ SECTIONS ...@@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .; __sun4v_2insn_patch_end = .;
} }
PERCPU(SMP_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
__init_end = .; __init_end = .;
......
...@@ -60,7 +60,7 @@ SECTIONS ...@@ -60,7 +60,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_sinitdata) = .; VMLINUX_SYMBOL(_sinitdata) = .;
INIT_DATA_SECTION(16) :data =0 INIT_DATA_SECTION(16) :data =0
PERCPU(L2_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(L2_CACHE_BYTES)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .; VMLINUX_SYMBOL(_einitdata) = .;
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
INIT_SETUP(0) INIT_SETUP(0)
} }
PERCPU(32, 32) PERCPU_SECTION(32)
.initcall.init : { .initcall.init : {
INIT_CALLS INIT_CALLS
......
...@@ -509,6 +509,11 @@ do { \ ...@@ -509,6 +509,11 @@ do { \
* it in software. The address used in the cmpxchg16 instruction must be * it in software. The address used in the cmpxchg16 instruction must be
* aligned to a 16 byte boundary. * aligned to a 16 byte boundary.
*/ */
#ifdef CONFIG_SMP
#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP3
#else
#define CMPXCHG16B_EMU_CALL "call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP2
#endif
#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ #define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \
({ \ ({ \
char __ret; \ char __ret; \
...@@ -517,7 +522,7 @@ do { \ ...@@ -517,7 +522,7 @@ do { \
typeof(o2) __o2 = o2; \ typeof(o2) __o2 = o2; \
typeof(o2) __n2 = n2; \ typeof(o2) __n2 = n2; \
typeof(o2) __dummy; \ typeof(o2) __dummy; \
alternative_io("call this_cpu_cmpxchg16b_emu\n\t" ASM_NOP4, \ alternative_io(CMPXCHG16B_EMU_CALL, \
"cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \
X86_FEATURE_CX16, \ X86_FEATURE_CX16, \
ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \
......
...@@ -326,7 +326,7 @@ SECTIONS ...@@ -326,7 +326,7 @@ SECTIONS
} }
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif #endif
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
......
...@@ -155,7 +155,7 @@ SECTIONS ...@@ -155,7 +155,7 @@ SECTIONS
INIT_RAM_FS INIT_RAM_FS
} }
PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE) PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
/* We need this dummy segment here */ /* We need this dummy segment here */
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION * HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE) * INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...) * INIT_DATA_SECTION(...)
* PERCPU(CACHELINE_SIZE, PAGE_SIZE) * PERCPU_SECTION(CACHELINE_SIZE)
* __init_end = .; * __init_end = .;
* *
* _stext = .; * _stext = .;
...@@ -681,6 +681,28 @@ ...@@ -681,6 +681,28 @@
*(.discard.*) \ *(.discard.*) \
} }
/**
* PERCPU_INPUT - the percpu input sections
* @cacheline: cacheline size
*
* The core percpu section names and core symbols which do not rely
* directly upon load addresses.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .;
/** /**
* PERCPU_VADDR - define output section for percpu area * PERCPU_VADDR - define output section for percpu area
* @cacheline: cacheline size * @cacheline: cacheline size
...@@ -703,52 +725,33 @@ ...@@ -703,52 +725,33 @@
* *
* Note that this macros defines __per_cpu_load as an absolute symbol. * Note that this macros defines __per_cpu_load as an absolute symbol.
* If there is no need to put the percpu section at a predetermined * If there is no need to put the percpu section at a predetermined
* address, use PERCPU(). * address, use PERCPU_SECTION.
*/ */
#define PERCPU_VADDR(cacheline, vaddr, phdr) \ #define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \ - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_start) = .; \ PERCPU_INPUT(cacheline) \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
} phdr \ } phdr \
. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/** /**
* PERCPU - define output section for percpu area, simple version * PERCPU_SECTION - define output section for percpu area, simple version
* @cacheline: cacheline size * @cacheline: cacheline size
* @align: required alignment
* *
* Align to @align and outputs output section for percpu area. This macro * Align to PAGE_SIZE and outputs output section for percpu area. This
* doesn't manipulate @vaddr or @phdr and __per_cpu_load and * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical. * __per_cpu_start will be identical.
* *
* This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,) * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
* except that __per_cpu_load is defined as a relative symbol against * except that __per_cpu_load is defined as a relative symbol against
* .data..percpu which is required for relocatable x86_32 configuration. * .data..percpu which is required for relocatable x86_32 configuration.
*/ */
#define PERCPU(cacheline, align) \ #define PERCPU_SECTION(cacheline) \
. = ALIGN(align); \ . = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \ PERCPU_INPUT(cacheline) \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
*(.data..percpu..readmostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
} }
......
...@@ -2866,9 +2866,7 @@ static int alloc_cwqs(struct workqueue_struct *wq) ...@@ -2866,9 +2866,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
} }
} }
/* just in case, make sure it's actually aligned /* just in case, make sure it's actually aligned */
* - this is affected by PERCPU() alignment in vmlinux.lds.S
*/
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
return wq->cpu_wq.v ? 0 : -ENOMEM; return wq->cpu_wq.v ? 0 : -ENOMEM;
} }
......
...@@ -1215,8 +1215,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1215,8 +1215,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON(!ai->static_size);
PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
#endif #endif
PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON(!base_addr);
PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
...@@ -1645,8 +1647,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, ...@@ -1645,8 +1647,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
/* warn if maximum distance is further than 75% of vmalloc space */ /* warn if maximum distance is further than 75% of vmalloc space */
if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
"space 0x%lx\n", "space 0x%lx\n", max_distance,
max_distance, VMALLOC_END - VMALLOC_START); (unsigned long)(VMALLOC_END - VMALLOC_START));
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
/* and fail if we have fallback */ /* and fail if we have fallback */
rc = -EINVAL; rc = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment