Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
167c0fd7
Commit
167c0fd7
authored
Feb 11, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://are.twiddle.net:8080/linux-alpha-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
6c645061
c8075847
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
187 additions
and
175 deletions
+187
-175
arch/alpha/kernel/smp.c
arch/alpha/kernel/smp.c
+151
-155
include/asm-alpha/bitops.h
include/asm-alpha/bitops.h
+20
-8
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock.h
+16
-12
No files found.
arch/alpha/kernel/smp.c
View file @
167c0fd7
...
...
@@ -62,6 +62,7 @@ static struct {
enum
ipi_message_type
{
IPI_RESCHEDULE
,
IPI_MIGRATION
,
IPI_CALL_FUNC
,
IPI_CPU_STOP
,
};
...
...
@@ -69,7 +70,7 @@ enum ipi_message_type {
spinlock_t
kernel_flag
__cacheline_aligned_in_smp
=
SPIN_LOCK_UNLOCKED
;
/* Set to a secondary's cpuid when it comes online. */
static
unsigned
long
smp_secondary_alive
;
static
int
smp_secondary_alive
__initdata
=
0
;
/* Which cpus ids came online. */
unsigned
long
cpu_present_mask
;
...
...
@@ -82,6 +83,7 @@ int smp_num_probed; /* Internal processor count */
int
smp_num_cpus
=
1
;
/* Number that came online. */
int
smp_threads_ready
;
/* True once the per process idle is forked. */
cycles_t
cacheflush_time
;
unsigned
long
cache_decay_ticks
;
int
__cpu_number_map
[
NR_CPUS
];
int
__cpu_logical_map
[
NR_CPUS
];
...
...
@@ -156,13 +158,6 @@ smp_callin(void)
{
int
cpuid
=
hard_smp_processor_id
();
if
(
current
!=
init_tasks
[
cpu_number_map
(
cpuid
)])
{
printk
(
"BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p
\n
"
,
cpuid
,
current
,
init_tasks
[
cpu_number_map
(
cpuid
)]);
}
DBGS
((
"CALLIN %d state 0x%lx
\n
"
,
cpuid
,
current
->
state
));
/* Turn on machine checks. */
wrmces
(
7
);
...
...
@@ -175,22 +170,21 @@ smp_callin(void)
/* Get our local ticker going. */
smp_setup_percpu_timer
(
cpuid
);
/* All kernel threads share the same mm context. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Must have completely accurate bogos. */
__sti
();
/*
* Wait boot CPU to stop with irq enabled before
* running calibrate_delay().
*/
/* Wait boot CPU to stop with irq enabled before running
calibrate_delay. */
wait_boot_cpu_to_stop
(
cpuid
);
mb
();
calibrate_delay
();
smp_store_cpu_info
(
cpuid
);
/*
* Allow master to continue only after we written
* the loops_per_jiffy.
*/
/* Allow master to continue only after we written loops_per_jiffy. */
wmb
();
smp_secondary_alive
=
1
;
...
...
@@ -198,15 +192,9 @@ smp_callin(void)
while
(
!
smp_threads_ready
)
barrier
();
DBGS
((
"smp_callin: commencing CPU %d current %p
\n
"
,
cpuid
,
current
));
DBGS
((
"smp_callin: commencing CPU %d current %p
active_mm %p
\n
"
,
cpuid
,
current
,
current
->
active_mm
));
/* Setup the scheduler for this processor. */
init_idle
();
/* ??? This should be in init_idle. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Do nothing. */
cpu_idle
();
}
...
...
@@ -222,8 +210,9 @@ static void __init
smp_tune_scheduling
(
int
cpuid
)
{
struct
percpu_struct
*
cpu
;
unsigned
long
on_chip_cache
;
unsigned
long
freq
;
unsigned
long
on_chip_cache
;
/* kB */
unsigned
long
freq
;
/* Hz */
unsigned
long
bandwidth
=
350
;
/* MB/s */
cpu
=
(
struct
percpu_struct
*
)((
char
*
)
hwrpb
+
hwrpb
->
processor_offset
+
cpuid
*
hwrpb
->
processor_size
);
...
...
@@ -244,43 +233,54 @@ smp_tune_scheduling (int cpuid)
case
EV6_CPU
:
case
EV67_CPU
:
on_chip_cache
=
64
+
64
;
break
;
default:
on_chip_cache
=
8
+
8
;
on_chip_cache
=
64
+
64
;
break
;
}
freq
=
hwrpb
->
cycle_freq
?
:
est_cycle_freq
;
#if 0
/* Magic estimation stolen from x86 port. */
cacheflush_time = freq / 1024L * on_chip_cache / 5000L;
cacheflush_time
=
(
freq
/
1000000
)
*
(
on_chip_cache
<<
10
)
/
bandwidth
;
cache_decay_ticks
=
cacheflush_time
/
(
freq
/
1000
)
*
HZ
/
1000
;
printk("Using heuristic of %d cycles.\n",
cacheflush_time);
#else
/* Magic value to force potential preemption of other CPUs. */
cacheflush_time
=
INT_MAX
;
printk
(
"per-CPU timeslice cutoff: %ld.%02ld usecs.
\n
"
,
cacheflush_time
/
(
freq
/
1000000
),
(
cacheflush_time
*
100
/
(
freq
/
1000000
))
%
100
);
printk
(
"task migration cache decay timeout: %ld msecs.
\n
"
,
(
cache_decay_ticks
+
1
)
*
1000
/
HZ
);
}
printk
(
"Using heuristic of %d cycles.
\n
"
,
cacheflush_time
);
#endif
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
static
int
__init
wait_for_txrdy
(
unsigned
long
cpumask
)
{
unsigned
long
timeout
;
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
return
0
;
timeout
=
jiffies
+
10
*
HZ
;
while
(
time_before
(
jiffies
,
timeout
))
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
return
0
;
udelay
(
10
);
barrier
();
}
return
-
1
;
}
/*
* Send a message to a secondary's console. "START" is one such
* interesting message. ;-)
*/
static
void
static
void
__init
send_secondary_console_msg
(
char
*
str
,
int
cpuid
)
{
struct
percpu_struct
*
cpu
;
register
char
*
cp1
,
*
cp2
;
unsigned
long
cpumask
;
size_t
len
;
long
timeout
;
cpu
=
(
struct
percpu_struct
*
)
((
char
*
)
hwrpb
...
...
@@ -288,9 +288,8 @@ send_secondary_console_msg(char *str, int cpuid)
+
cpuid
*
hwrpb
->
processor_size
);
cpumask
=
(
1UL
<<
cpuid
);
if
(
hwrpb
->
txrdy
&
cpumask
)
goto
delay1
;
ready1:
if
(
wait_for_txrdy
(
cpumask
))
goto
timeout
;
cp2
=
str
;
len
=
strlen
(
cp2
);
...
...
@@ -302,34 +301,12 @@ send_secondary_console_msg(char *str, int cpuid)
wmb
();
set_bit
(
cpuid
,
&
hwrpb
->
rxrdy
);
if
(
hwrpb
->
txrdy
&
cpumask
)
goto
delay2
;
ready2:
return
;
delay1:
/* Wait 10 seconds. Note that jiffies aren't ticking yet. */
for
(
timeout
=
1000000
;
timeout
>
0
;
--
timeout
)
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
goto
ready1
;
udelay
(
10
);
barrier
();
}
goto
timeout
;
delay2:
/* Wait 10 seconds. */
for
(
timeout
=
1000000
;
timeout
>
0
;
--
timeout
)
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
goto
ready2
;
udelay
(
10
);
barrier
();
}
if
(
wait_for_txrdy
(
cpumask
))
goto
timeout
;
return
;
timeout:
timeout:
printk
(
"Processor %x not ready
\n
"
,
cpuid
);
return
;
}
/*
...
...
@@ -392,7 +369,7 @@ static int __init
secondary_cpu_start
(
int
cpuid
,
struct
task_struct
*
idle
)
{
struct
percpu_struct
*
cpu
;
struct
pcb_struct
*
hwpcb
;
struct
pcb_struct
*
hwpcb
,
*
ipcb
;
long
timeout
;
cpu
=
(
struct
percpu_struct
*
)
...
...
@@ -400,18 +377,19 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
+
hwrpb
->
processor_offset
+
cpuid
*
hwrpb
->
processor_size
);
hwpcb
=
(
struct
pcb_struct
*
)
cpu
->
hwpcb
;
ipcb
=
&
idle
->
thread_info
->
pcb
;
/* Initialize the CPU's HWPCB to something just good enough for
us to get started. Immediately after starting, we'll swpctx
to the target idle task's p
t
b. Reuse the stack in the mean
to the target idle task's p
c
b. Reuse the stack in the mean
time. Precalculate the target PCBB. */
hwpcb
->
ksp
=
(
unsigned
long
)
idle
+
sizeof
(
union
task
_union
)
-
16
;
hwpcb
->
ksp
=
(
unsigned
long
)
ipcb
+
sizeof
(
union
thread
_union
)
-
16
;
hwpcb
->
usp
=
0
;
hwpcb
->
ptbr
=
i
dle
->
thread
.
ptbr
;
hwpcb
->
ptbr
=
i
pcb
->
ptbr
;
hwpcb
->
pcc
=
0
;
hwpcb
->
asn
=
0
;
hwpcb
->
unique
=
virt_to_phys
(
&
idle
->
thread
);
hwpcb
->
flags
=
i
dle
->
thread
.
pal_
flags
;
hwpcb
->
unique
=
virt_to_phys
(
ipcb
);
hwpcb
->
flags
=
i
pcb
->
flags
;
hwpcb
->
res1
=
hwpcb
->
res2
=
0
;
#if 0
...
...
@@ -419,7 +397,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
#endif
DBGS
((
"Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx
\n
"
,
cpuid
,
idle
->
state
,
i
dle
->
thread
.
pal_
flags
));
cpuid
,
idle
->
state
,
i
pcb
->
flags
));
/* Setup HWRPB fields that SRM uses to activate secondary CPU */
hwrpb
->
CPU_restart
=
__smp_callin
;
...
...
@@ -439,9 +417,9 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
send_secondary_console_msg
(
"START
\r\n
"
,
cpuid
);
/* Wait 10 seconds for an ACK from the console.
Note that jiffies
aren't ticking yet. */
for
(
timeout
=
1000000
;
timeout
>
0
;
timeout
--
)
{
/* Wait 10 seconds for an ACK from the console.
*/
timeout
=
jiffies
+
10
*
HZ
;
while
(
time_before
(
jiffies
,
timeout
)
)
{
if
(
cpu
->
flags
&
1
)
goto
started
;
udelay
(
10
);
...
...
@@ -450,18 +428,17 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
printk
(
KERN_ERR
"SMP: Processor %d failed to start.
\n
"
,
cpuid
);
return
-
1
;
started:
started:
DBGS
((
"secondary_cpu_start: SUCCESS for CPU %d!!!
\n
"
,
cpuid
));
return
0
;
}
static
int
__init
fork_by_hand
(
void
)
static
int
__init
fork_by_hand
(
void
)
{
/* Don't care about the contents of regs since we'll never
reschedule the forked task. */
struct
pt_regs
regs
;
/*
* don't care about the regs settings since
* we'll never reschedule the forked task.
*/
return
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
);
}
...
...
@@ -474,67 +451,57 @@ smp_boot_one_cpu(int cpuid, int cpunum)
struct
task_struct
*
idle
;
long
timeout
;
/* Cook up an idler for this guy. Note that the address we give
to kernel_thread is irrelevant -- it's going to start where
HWRPB.CPU_restart says to start. But this gets all the other
task-y sort of data structures set up like we wish. */
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
/* Cook up an idler for this guy. Note that the address we
give to kernel_thread is irrelevant -- it's going to start
where HWRPB.CPU_restart says to start. But this gets all
the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid
rescheduling the child. */
if
(
fork_by_hand
()
<
0
)
panic
(
"failed fork for CPU %d"
,
cpuid
);
idle
=
init_task
.
prev_task
;
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpuid
);
if
(
idle
==
&
init_task
)
panic
(
"idle process is init_task for CPU %d"
,
cpuid
);
idle
->
processor
=
cpuid
;
idle
->
cpus_runnable
=
1
<<
cpuid
;
/* we schedule the first task manually */
init_idle
(
idle
,
cpuid
);
unhash_process
(
idle
);
__cpu_logical_map
[
cpunum
]
=
cpuid
;
__cpu_number_map
[
cpuid
]
=
cpunum
;
del_from_runqueue
(
idle
);
unhash_process
(
idle
);
init_tasks
[
cpunum
]
=
idle
;
DBGS
((
"smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx
\n
"
,
cpuid
,
idle
->
state
,
idle
->
flags
));
/* The secondary will change this once it is happy. Note that
secondary_cpu_start contains the necessary memory barrier. */
/* Signal the secondary to wait a moment. */
smp_secondary_alive
=
-
1
;
/* Whirrr, whirrr, whirrrrrrrrr... */
if
(
secondary_cpu_start
(
cpuid
,
idle
))
return
-
1
;
/* Notify the secondary CPU it can run calibrate_delay. */
mb
();
/* Notify the secondary CPU it can run calibrate_delay() */
smp_secondary_alive
=
0
;
/* We've been acked by the console; wait one second for the task
to start up for real. Note that jiffies aren't ticking yet. */
for
(
timeout
=
0
;
timeout
<
1000000
;
timeout
++
)
{
/* We've been acked by the console; wait one second for
the task to start up for real. */
timeout
=
jiffies
+
1
*
HZ
;
while
(
time_before
(
jiffies
,
timeout
))
{
if
(
smp_secondary_alive
==
1
)
goto
alive
;
udelay
(
10
);
barrier
();
}
/*
we must invalidate our stuff as we failed to boot the CPU
*/
/*
We must invalidate our stuff as we failed to boot the CPU.
*/
__cpu_logical_map
[
cpunum
]
=
-
1
;
__cpu_number_map
[
cpuid
]
=
-
1
;
/* the idle task is local to us so free it as we don't use it */
free_task_struct
(
idle
);
printk
(
KERN_ERR
"SMP: Processor %d is stuck.
\n
"
,
cpuid
);
return
-
1
;
alive:
alive:
/* Another "Red Snapper". */
return
0
;
}
...
...
@@ -605,20 +572,15 @@ smp_boot_cpus(void)
__cpu_number_map
[
boot_cpuid
]
=
0
;
__cpu_logical_map
[
0
]
=
boot_cpuid
;
current
->
processor
=
boot_cpuid
;
current
_thread_info
()
->
cpu
=
boot_cpuid
;
smp_store_cpu_info
(
boot_cpuid
);
smp_tune_scheduling
(
boot_cpuid
);
smp_setup_percpu_timer
(
boot_cpuid
);
init_idle
();
/* ??? This should be in init_idle. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Nothing to do on a UP box, or when told not to. */
if
(
smp_num_probed
==
1
||
max_cpus
==
0
)
{
cpu_present_mask
=
1UL
<<
boot_cpuid
;
printk
(
KERN_INFO
"SMP mode deactivated.
\n
"
);
return
;
}
...
...
@@ -707,26 +669,35 @@ setup_profiling_timer(unsigned int multiplier)
static
void
send_ipi_message
(
unsigned
long
to_whom
,
enum
ipi_message_type
operation
)
{
long
i
,
j
;
/* Reduce the number of memory barriers by doing two loops,
one to set the bits, one to invoke the interrupts. */
mb
();
/* Order out-of-band data and bit setting. */
unsigned
long
i
,
set
,
n
;
for
(
i
=
0
,
j
=
1
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
{
if
(
to_whom
&
j
)
set_bit
(
operation
,
&
ipi_data
[
i
].
bits
);
set
=
to_whom
&
-
to_whom
;
if
(
to_whom
==
set
)
{
n
=
__ffs
(
set
);
mb
();
set_bit
(
operation
,
&
ipi_data
[
n
].
bits
);
mb
();
wripir
(
n
);
}
else
{
mb
();
for
(
i
=
to_whom
;
i
;
i
&=
~
set
)
{
set
=
i
&
-
i
;
n
=
__ffs
(
set
);
set_bit
(
operation
,
&
ipi_data
[
n
].
bits
);
}
mb
();
/* Order bit setting and interrupt. */
for
(
i
=
0
,
j
=
1
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
{
if
(
to_whom
&
j
)
wripir
(
i
);
mb
();
for
(
i
=
to_whom
;
i
;
i
&=
~
set
)
{
set
=
i
&
-
i
;
n
=
__ffs
(
set
);
wripir
(
n
);
}
}
}
/* Data for IPI_MIGRATION. */
static
task_t
*
migration_task
;
/* Structure and data for smp_call_function. This is designed to
minimize static memory requirements. Plus it looks cleaner. */
...
...
@@ -743,13 +714,13 @@ static struct smp_call_struct *smp_call_function_data;
/* Atomicly drop data into a shared pointer. The pointer is free if
it is initially locked. If retry, spin until free. */
static
in
line
in
t
static
int
pointer_lock
(
void
*
lock
,
void
*
data
,
int
retry
)
{
void
*
old
,
*
tmp
;
mb
();
again:
again:
/* Compare and swap with zero. */
asm
volatile
(
"1: ldq_l %0,%1
\n
"
...
...
@@ -792,13 +763,25 @@ handle_ipi(struct pt_regs *regs)
which
=
ops
&
-
ops
;
ops
&=
~
which
;
which
=
ffz
(
~
which
);
which
=
__ffs
(
which
);
if
(
which
==
IPI_RESCHEDULE
)
{
switch
(
which
)
{
case
IPI_RESCHEDULE
:
/* Reschedule callback. Everything to be done
is done by the interrupt return path. */
break
;
case
IPI_MIGRATION
:
{
task_t
*
t
=
migration_task
;
mb
();
migration_task
=
0
;
sched_task_migrated
(
t
);
break
;
}
else
if
(
which
==
IPI_CALL_FUNC
)
{
case
IPI_CALL_FUNC
:
{
struct
smp_call_struct
*
data
;
void
(
*
func
)(
void
*
info
);
void
*
info
;
...
...
@@ -821,13 +804,16 @@ handle_ipi(struct pt_regs *regs)
/* Notify the sending CPU that the task is done. */
mb
();
if
(
wait
)
atomic_dec
(
&
data
->
unfinished_count
);
break
;
}
else
if
(
which
==
IPI_CPU_STOP
)
{
case
IPI_CPU_STOP
:
halt
();
}
else
{
default:
printk
(
KERN_CRIT
"Unknown IPI on CPU %d: %lu
\n
"
,
this_cpu
,
which
);
break
;
}
}
while
(
ops
);
...
...
@@ -851,10 +837,23 @@ smp_send_reschedule(int cpu)
send_ipi_message
(
1UL
<<
cpu
,
IPI_RESCHEDULE
);
}
void
smp_migrate_task
(
int
cpu
,
task_t
*
t
)
{
#if DEBUG_IPI_MSG
if
(
cpu
==
hard_smp_processor_id
())
printk
(
KERN_WARNING
"smp_migrate_task: Sending IPI to self.
\n
"
);
#endif
/* Acquire the migration_task mutex. */
pointer_lock
(
&
migration_task
,
t
,
1
);
send_ipi_message
(
1UL
<<
cpu
,
IPI_MIGRATION
);
}
void
smp_send_stop
(
void
)
{
unsigned
long
to_whom
=
cpu_present_mask
^
(
1UL
<<
smp_processor_id
());
unsigned
long
to_whom
=
cpu_present_mask
&
~
(
1UL
<<
smp_processor_id
());
#if DEBUG_IPI_MSG
if
(
hard_smp_processor_id
()
!=
boot_cpu_id
)
printk
(
KERN_WARNING
"smp_send_stop: Not on boot cpu.
\n
"
);
...
...
@@ -881,16 +880,13 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
struct
smp_call_struct
data
;
long
timeout
;
int
num_cpus_to_call
;
long
i
,
j
;
data
.
func
=
func
;
data
.
info
=
info
;
data
.
wait
=
wait
;
to_whom
&=
~
(
1L
<<
smp_processor_id
());
for
(
i
=
0
,
j
=
1
,
num_cpus_to_call
=
0
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
if
(
to_whom
&
j
)
num_cpus_to_call
++
;
num_cpus_to_call
=
hweight64
(
to_whom
);
atomic_set
(
&
data
.
unstarted_count
,
num_cpus_to_call
);
atomic_set
(
&
data
.
unfinished_count
,
num_cpus_to_call
);
...
...
@@ -1094,7 +1090,7 @@ flush_icache_page(struct vm_area_struct *vma, struct page *page)
#ifdef CONFIG_DEBUG_SPINLOCK
void
spin_unlock
(
spinlock_t
*
lock
)
_raw_
spin_unlock
(
spinlock_t
*
lock
)
{
mb
();
lock
->
lock
=
0
;
...
...
@@ -1185,7 +1181,7 @@ debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
#endif
/* CONFIG_DEBUG_SPINLOCK */
#ifdef CONFIG_DEBUG_RWLOCK
void
write_lock
(
rwlock_t
*
lock
)
void
_raw_
write_lock
(
rwlock_t
*
lock
)
{
long
regx
,
regy
;
int
stuck_lock
,
stuck_reader
;
...
...
@@ -1230,7 +1226,7 @@ void write_lock(rwlock_t * lock)
}
}
void
read_lock
(
rwlock_t
*
lock
)
void
_raw_
read_lock
(
rwlock_t
*
lock
)
{
long
regx
;
int
stuck_lock
;
...
...
include/asm-alpha/bitops.h
View file @
167c0fd7
...
...
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/compiler.h>
/*
* Copyright 1994, Linus Torvalds.
...
...
@@ -60,14 +61,14 @@ clear_bit(unsigned long nr, volatile void * addr)
__asm__
__volatile__
(
"1: ldl_l %0,%3
\n
"
"
and
%0,%2,%0
\n
"
"
bic
%0,%2,%0
\n
"
" stl_c %0,%1
\n
"
" beq %0,2f
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
".previous"
:
"=&r"
(
temp
),
"=m"
(
*
m
)
:
"Ir"
(
~
(
1UL
<<
(
nr
&
31
)
)),
"m"
(
*
m
));
:
"Ir"
(
1UL
<<
(
nr
&
31
)),
"m"
(
*
m
));
}
/*
...
...
@@ -246,12 +247,15 @@ test_bit(int nr, volatile void * addr)
*/
static
inline
unsigned
long
ffz_b
(
unsigned
long
x
)
{
unsigned
long
sum
=
0
;
unsigned
long
sum
,
x1
,
x2
,
x4
;
x
=
~
x
&
-~
x
;
/* set first 0 bit, clear others */
if
(
x
&
0xF0
)
sum
+=
4
;
if
(
x
&
0xCC
)
sum
+=
2
;
if
(
x
&
0xAA
)
sum
+=
1
;
x1
=
x
&
0xAA
;
x2
=
x
&
0xCC
;
x4
=
x
&
0xF0
;
sum
=
x2
?
2
:
0
;
sum
+=
(
x4
!=
0
)
*
4
;
sum
+=
(
x1
!=
0
);
return
sum
;
}
...
...
@@ -268,7 +272,7 @@ static inline unsigned long ffz(unsigned long word)
__asm__
(
"cmpbge %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
~
0UL
));
qofs
=
ffz_b
(
bits
);
__asm__
(
"extbl %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
qofs
)
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
bits
);
return
qofs
*
8
+
bofs
;
...
...
@@ -290,7 +294,7 @@ static inline unsigned long __ffs(unsigned long word)
__asm__
(
"cmpbge $31,%1,%0"
:
"=r"
(
bits
)
:
"r"
(
word
));
qofs
=
ffz_b
(
bits
);
__asm__
(
"extbl %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
qofs
)
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
~
bits
);
return
qofs
*
8
+
bofs
;
...
...
@@ -349,6 +353,14 @@ static inline unsigned long hweight64(unsigned long w)
#define hweight16(x) hweight64((x) & 0xfffful)
#define hweight8(x) hweight64((x) & 0xfful)
#else
static
inline
unsigned
long
hweight64
(
unsigned
long
w
)
{
unsigned
long
result
;
for
(
result
=
0
;
w
;
w
>>=
1
)
result
+=
(
w
&
1
);
return
result
;
}
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
...
...
include/asm-alpha/spinlock.h
View file @
167c0fd7
...
...
@@ -38,12 +38,12 @@ typedef struct {
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
#if CONFIG_DEBUG_SPINLOCK
extern
void
spin_unlock
(
spinlock_t
*
lock
);
extern
void
_raw_
spin_unlock
(
spinlock_t
*
lock
);
extern
void
debug_spin_lock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
extern
int
debug_spin_trylock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
#define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
#define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
#define
_raw_
spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
#define
_raw_
spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
#define spin_lock_own(LOCK, LOCATION) \
do { \
...
...
@@ -54,13 +54,13 @@ do { \
(LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
} while (0)
#else
static
inline
void
spin_unlock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_unlock
(
spinlock_t
*
lock
)
{
mb
();
lock
->
lock
=
0
;
}
static
inline
void
spin_lock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_lock
(
spinlock_t
*
lock
)
{
long
tmp
;
...
...
@@ -83,7 +83,11 @@ static inline void spin_lock(spinlock_t * lock)
:
"m"
(
lock
->
lock
)
:
"memory"
);
}
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
static
inline
int
_raw_spin_trylock
(
spinlock_t
*
lock
)
{
return
!
test_and_set_bit
(
0
,
&
lock
->
lock
);
}
#define spin_lock_own(LOCK, LOCATION) ((void)0)
#endif
/* CONFIG_DEBUG_SPINLOCK */
...
...
@@ -98,10 +102,10 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#if CONFIG_DEBUG_RWLOCK
extern
void
write_lock
(
rwlock_t
*
lock
);
extern
void
read_lock
(
rwlock_t
*
lock
);
extern
void
_raw_
write_lock
(
rwlock_t
*
lock
);
extern
void
_raw_
read_lock
(
rwlock_t
*
lock
);
#else
static
inline
void
write_lock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
write_lock
(
rwlock_t
*
lock
)
{
long
regx
;
...
...
@@ -121,7 +125,7 @@ static inline void write_lock(rwlock_t * lock)
:
"0"
(
*
(
volatile
int
*
)
lock
)
:
"memory"
);
}
static
inline
void
read_lock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
read_lock
(
rwlock_t
*
lock
)
{
long
regx
;
...
...
@@ -142,13 +146,13 @@ static inline void read_lock(rwlock_t * lock)
}
#endif
/* CONFIG_DEBUG_RWLOCK */
static
inline
void
write_unlock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
write_unlock
(
rwlock_t
*
lock
)
{
mb
();
*
(
volatile
int
*
)
lock
=
0
;
}
static
inline
void
read_unlock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
read_unlock
(
rwlock_t
*
lock
)
{
long
regx
;
__asm__
__volatile__
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment