Commit edc35bd7 authored by Thomas Gleixner's avatar Thomas Gleixner

locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED

Further name space cleanup. No functional change
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
parent 445c8951
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -17,7 +17,7 @@ typedef struct { ...@@ -17,7 +17,7 @@ typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int read_counter : 31; volatile unsigned int read_counter : 31;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile int slock; volatile int slock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } #define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct { typedef struct {
volatile int lock; volatile int lock;
......
...@@ -14,7 +14,7 @@ typedef struct { ...@@ -14,7 +14,7 @@ typedef struct {
unsigned int lock; unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -4,10 +4,10 @@ ...@@ -4,10 +4,10 @@
typedef struct { typedef struct {
#ifdef CONFIG_PA20 #ifdef CONFIG_PA20
volatile unsigned int slock; volatile unsigned int slock;
# define __RAW_SPIN_LOCK_UNLOCKED { 1 } # define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else #else
volatile unsigned int lock[4]; volatile unsigned int lock[4];
# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } # define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif #endif
} arch_spinlock_t; } arch_spinlock_t;
...@@ -16,6 +16,6 @@ typedef struct { ...@@ -16,6 +16,6 @@ typedef struct {
volatile int counter; volatile int counter;
} raw_rwlock_t; } raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } #define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif #endif
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
}; };
#endif #endif
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int slock; volatile unsigned int slock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile signed int lock; volatile signed int lock;
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
struct rtas_t rtas = { struct rtas_t rtas = {
.lock = __RAW_SPIN_LOCK_UNLOCKED .lock = __ARCH_SPIN_LOCK_UNLOCKED
}; };
EXPORT_SYMBOL(rtas); EXPORT_SYMBOL(rtas);
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int owner_cpu; volatile unsigned int owner_cpu;
} __attribute__ ((aligned (4))) arch_spinlock_t; } __attribute__ ((aligned (4))) arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } #define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -9,7 +9,7 @@ typedef struct { ...@@ -9,7 +9,7 @@ typedef struct {
volatile unsigned char lock; volatile unsigned char lock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
......
...@@ -9,7 +9,7 @@ typedef struct arch_spinlock { ...@@ -9,7 +9,7 @@ typedef struct arch_spinlock {
unsigned int slock; unsigned int slock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct { typedef struct {
unsigned int lock; unsigned int lock;
......
...@@ -188,7 +188,7 @@ void dump_stack(void) ...@@ -188,7 +188,7 @@ void dump_stack(void)
} }
EXPORT_SYMBOL(dump_stack); EXPORT_SYMBOL(dump_stack);
static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1; static int die_owner = -1;
static unsigned int die_nest_count; static unsigned int die_nest_count;
......
...@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; ...@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
* we want to have the fastest, inlined, non-debug version * we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps: * of a critical section, to be able to prove TSC time-warps:
*/ */
static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp; static __cpuinitdata cycles_t max_warp;
......
...@@ -43,14 +43,14 @@ typedef struct { ...@@ -43,14 +43,14 @@ typedef struct {
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
# define __SPIN_LOCK_UNLOCKED(lockname) \ # define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \ .magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \ .owner = SPINLOCK_OWNER_INIT, \
.owner_cpu = -1, \ .owner_cpu = -1, \
SPIN_DEP_MAP_INIT(lockname) } SPIN_DEP_MAP_INIT(lockname) }
#else #else
# define __SPIN_LOCK_UNLOCKED(lockname) \ # define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
SPIN_DEP_MAP_INIT(lockname) } SPIN_DEP_MAP_INIT(lockname) }
#endif #endif
......
...@@ -18,13 +18,13 @@ typedef struct { ...@@ -18,13 +18,13 @@ typedef struct {
volatile unsigned int slock; volatile unsigned int slock;
} arch_spinlock_t; } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } #define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else #else
typedef struct { } arch_spinlock_t; typedef struct { } arch_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { } #define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif #endif
......
...@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); ...@@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock * to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code... * code to recurse back into the lockdep code...
*/ */
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void) static int graph_lock(void)
{ {
......
...@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) ...@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->buffer = buffer; cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock); spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu));
......
...@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) ...@@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
* CONFIG_TRACER_MAX_TRACE. * CONFIG_TRACER_MAX_TRACE.
*/ */
static arch_spinlock_t ftrace_max_lock = static arch_spinlock_t ftrace_max_lock =
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency; unsigned long __read_mostly tracing_max_latency;
...@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; ...@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx; static int cmdline_idx;
static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */ /* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly; static atomic_t trace_record_cmdline_disabled __read_mostly;
...@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) ...@@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{ {
static arch_spinlock_t trace_buf_lock = static arch_spinlock_t trace_buf_lock =
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE]; static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint; struct ftrace_event_call *call = &event_bprint;
...@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, ...@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
int trace_array_vprintk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args) unsigned long ip, const char *fmt, va_list args)
{ {
static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE]; static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print; struct ftrace_event_call *call = &event_print;
...@@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s) ...@@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing) static void __ftrace_dump(bool disable_tracing)
{ {
static arch_spinlock_t ftrace_dump_lock = static arch_spinlock_t ftrace_dump_lock =
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */ /* use static because iter can be a bit big for the stack */
static struct trace_iterator iter; static struct trace_iterator iter;
unsigned int old_userobj; unsigned int old_userobj;
......
...@@ -74,7 +74,7 @@ static struct { ...@@ -74,7 +74,7 @@ static struct {
arch_spinlock_t lock; arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp = } trace_clock_struct ____cacheline_aligned_in_smp =
{ {
.lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
}; };
u64 notrace trace_clock_global(void) u64 notrace trace_clock_global(void)
......
...@@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1; ...@@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1;
static int wakeup_rt; static int wakeup_rt;
static arch_spinlock_t wakeup_lock = static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr);
......
...@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = { ...@@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = {
static unsigned long max_stack_size; static unsigned long max_stack_size;
static arch_spinlock_t max_stack_lock = static arch_spinlock_t max_stack_lock =
(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly; static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active); static DEFINE_PER_CPU(int, trace_active);
......
...@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, ...@@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock)); debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0); lockdep_init_map(&lock->dep_map, name, key, 0);
#endif #endif
lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC; lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT; lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1; lock->owner_cpu = -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment