Commit 3d7af078 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] per-cpu areas

This is the Richard Henderson-approved, cleaner, brighter per-cpu patch.
parent 09c1076e
......@@ -57,6 +57,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
......
......@@ -111,6 +111,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
......
......@@ -4,8 +4,10 @@
#include <linux/config.h>
#include <asm/cache.h>
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES)
#endif
#ifndef SMP_CACHE_BYTES
......
......@@ -13,4 +13,11 @@
#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
/* This macro obfuscates arithmetic on a variable address so that gcc
shouldn't recognize the original var, and make assumptions about it */
strcpy(s, "xxx"+X) => memcpy(s, "xxx"+X, 4-X) */
#define RELOC_HIDE(var, off) \
({ __typeof__(&(var)) __ptr; \
__asm__ ("" : "=g"(__ptr) : "0"((void *)&(var) + (off))); \
*__ptr; })
#endif /* __LINUX_COMPILER_H */
......@@ -11,6 +11,7 @@
#ifdef CONFIG_SMP
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <asm/smp.h>
/*
......@@ -71,7 +72,17 @@ extern volatile int smp_msg_id;
#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
#else
#define __per_cpu_data __attribute__((section(".data.percpu")))
#ifndef __HAVE_ARCH_PER_CPU
extern unsigned long __per_cpu_offset[NR_CPUS];
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) RELOC_HIDE(var, per_cpu_offset(cpu))
#define this_cpu(var) per_cpu(var, smp_processor_id())
#endif /* !__HAVE_ARCH_PER_CPU */
#else /* !SMP */
/*
* These macros fold the SMP functionality into a single CPU system
......@@ -90,6 +101,9 @@ extern volatile int smp_msg_id;
#define cpu_online_map 1
static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define __per_cpu_data
#define per_cpu(var, cpu) var
#define this_cpu(var) var
#endif
#endif
......@@ -270,8 +270,32 @@ static void __init smp_init(void)
#define smp_init() do { } while (0)
#endif
static inline void setup_per_cpu_areas(void)
{
}
#else
#ifndef __HAVE_ARCH_PER_CPU
unsigned long __per_cpu_offset[NR_CPUS];
static void __init setup_per_cpu_areas(void)
{
unsigned long size, i;
char *ptr;
/* Created by linker magic */
extern char __per_cpu_start[], __per_cpu_end[];
/* Copy section for each CPU (we discard the original) */
size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
ptr = alloc_bootmem(size * NR_CPUS);
for (i = 0; i < NR_CPUS; i++, ptr += size) {
__per_cpu_offset[i] = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, size);
}
}
#endif /* !__HAVE_ARCH_PER_CPU */
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
......@@ -314,6 +338,7 @@ asmlinkage void __init start_kernel(void)
lock_kernel();
printk(linux_banner);
setup_arch(&command_line);
setup_per_cpu_areas();
printk("Kernel command line: %s\n", saved_command_line);
parse_options(command_line);
trap_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment