Commit d7bb545d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc

* 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc:
  Remove DEBUG_SEMAPHORE from Kconfig
  Improve semaphore documentation
  Simplify semaphore implementation
  Add down_timeout and change ACPI to use it
  Introduce down_killable()
  Generic semaphore implementation
  Add semaphore.h to kernel_lock.c
  Fix quota.h includes
parents 75e98b34 2342e51b
...@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS) ...@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS)
EXTRA_CFLAGS := -Werror -Wno-sign-compare EXTRA_CFLAGS := -Werror -Wno-sign-compare
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \
alpha_ksyms.o systbls.o err_common.o io.o alpha_ksyms.o systbls.o err_common.o io.o
obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_VGA_HOSE) += console.o
......
...@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user); ...@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strnlen_user);
/* Semaphore helper functions. */
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__up_wakeup);
EXPORT_SYMBOL(down);
EXPORT_SYMBOL(down_interruptible);
EXPORT_SYMBOL(down_trylock);
EXPORT_SYMBOL(up);
/* /*
* SMP-specific symbols. * SMP-specific symbols.
*/ */
......
/*
* Alpha semaphore implementation.
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999, 2000 Richard Henderson
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/init.h>
/*
* This is basically the PPC semaphore scheme ported to use
* the Alpha ll/sc sequences, so see the PPC code for
* credits.
*/
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
long old_count, tmp = 0;
__asm__ __volatile__(
"1: ldl_l %0,%2\n"
" cmovgt %0,%0,%1\n"
" addl %1,%3,%1\n"
" stl_c %1,%2\n"
" beq %1,2f\n"
" mb\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "Ir" (incr), "1" (tmp), "m" (sem->count));
return old_count;
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*/
void __sched
__down_failed(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down failed(%p)\n",
tsk->comm, task_pid_nr(tsk), sem);
#endif
tsk->state = TASK_UNINTERRUPTIBLE;
wmb();
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down acquired(%p)\n",
tsk->comm, task_pid_nr(tsk), sem);
#endif
}
int __sched
__down_failed_interruptible(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
long ret = 0;
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down failed(%p)\n",
tsk->comm, task_pid_nr(tsk), sem);
#endif
tsk->state = TASK_INTERRUPTIBLE;
wmb();
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
ret = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down %s(%p)\n",
current->comm, task_pid_nr(current),
(ret < 0 ? "interrupted" : "acquired"), sem);
#endif
return ret;
}
void
__up_wakeup(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
void __sched
down(struct semaphore *sem)
{
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down(%p) <count=%d> from %p\n",
current->comm, task_pid_nr(current), sem,
atomic_read(&sem->count), __builtin_return_address(0));
#endif
__down(sem);
}
int __sched
down_interruptible(struct semaphore *sem)
{
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down(%p) <count=%d> from %p\n",
current->comm, task_pid_nr(current), sem,
atomic_read(&sem->count), __builtin_return_address(0));
#endif
return __down_interruptible(sem);
}
int
down_trylock(struct semaphore *sem)
{
int ret;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
ret = __down_trylock(sem);
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): down_trylock %s from %p\n",
current->comm, task_pid_nr(current),
ret ? "failed" : "acquired",
__builtin_return_address(0));
#endif
return ret;
}
void
up(struct semaphore *sem)
{
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
#ifdef CONFIG_DEBUG_SEMAPHORE
printk("%s(%d): up(%p) <count=%d> from %p\n",
current->comm, task_pid_nr(current), sem,
atomic_read(&sem->count), __builtin_return_address(0));
#endif
__up(sem);
}
...@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) ...@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# Object file lists. # Object file lists.
obj-y := compat.o entry-armv.o entry-common.o irq.o \ obj-y := compat.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o semaphore.o setup.o signal.o \ process.o ptrace.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o sys_arm.o stacktrace.o time.o traps.o
obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_ISA_DMA_API) += dma.o
......
/*
* ARM semaphore implementation, taken from
*
* i386 semaphore implementation.
*
* (C) Copyright 1999 Linus Torvalds
*
* Modified for ARM by Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is
* protected by the semaphore spinlock.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
static DEFINE_SPINLOCK(semaphore_lock);
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*
* We could have done the trylock with a
* single "cmpxchg" without failure cases,
* but then it wouldn't work on a 386.
*/
int __down_trylock(struct semaphore * sem)
{
int sleepers;
unsigned long flags;
spin_lock_irqsave(&semaphore_lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
/*
* The semaphore operations have a special calling sequence that
* allow us to do a simpler in-line version of them. These routines
* need to convert that sequence back into the C sequence when
* there is contention on the semaphore.
*
* ip contains the semaphore pointer on entry. Save the C-clobbered
* registers (r0 to r3 and lr), but not ip, as we use it as a return
* value in some cases..
* To remain AAPCS compliant (64-bit stack align) we save r4 as well.
*/
asm(" .section .sched.text,\"ax\",%progbits \n\
.align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down \n\
ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __down_interruptible_failed \n\
__down_interruptible_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down_interruptible \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __down_trylock_failed \n\
__down_trylock_failed: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down_trylock \n\
mov ip, r0 \n\
ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __up_wakeup \n\
__up_wakeup: \n\
stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __up \n\
ldmfd sp!, {r0 - r4, pc} \n\
");
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_interruptible_failed);
EXPORT_SYMBOL(__down_trylock_failed);
EXPORT_SYMBOL(__up_wakeup);
...@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds ...@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o obj-y += syscall_table.o syscall-stubs.o irq.o
obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o obj-y += setup.o traps.o ocd.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o obj-y += signal.o sys_avr32.o process.o time.o
obj-y += init_task.o switch_to.o cpu.o obj-y += init_task.o switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
......
/*
* AVR32 sempahore implementation.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* Based on linux/arch/i386/kernel/semaphore.c
* Copyright (C) 1999 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/atomic.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore *sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into the trylock
* failure case - we won't be sleeping, and we can't
* get the lock as it has contention. Just correct the
* count and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
return retval;
}
EXPORT_SYMBOL(__down_interruptible);
...@@ -31,10 +31,6 @@ config ZONE_DMA ...@@ -31,10 +31,6 @@ config ZONE_DMA
bool bool
default y default y
config SEMAPHORE_SLEEPERS
bool
default y
config GENERIC_FIND_NEXT_BIT config GENERIC_FIND_NEXT_BIT
bool bool
default y default y
......
...@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum); ...@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(is_in_rom); EXPORT_SYMBOL(is_in_rom);
EXPORT_SYMBOL(bfin_return_from_exception); EXPORT_SYMBOL(bfin_return_from_exception);
......
...@@ -5,8 +5,7 @@ ...@@ -5,8 +5,7 @@
extra-y := vmlinux.lds extra-y := vmlinux.lds
obj-y := process.o traps.o irq.o ptrace.o setup.o \ obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
time.o sys_cris.o semaphore.o
obj-$(CONFIG_MODULES) += crisksyms.o obj-$(CONFIG_MODULES) += crisksyms.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h> #include <asm/checksum.h>
...@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2); ...@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2);
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
/* Semaphore functions */
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
/* Userspace access functions */ /* Userspace access functions */
EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
......
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/sched.h>
#include <asm/semaphore-helper.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_VAR \
struct task_struct *tsk = current; \
wait_queue_t wait; \
init_waitqueue_entry(&wait, tsk);
#define DOWN_HEAD(task_state) \
\
\
tsk->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
tsk->state = (task_state); \
} \
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, tsk);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds ...@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \ sys_frv.o time.o setup.o frv_ksyms.o \
debug-stub.o irq.o sleep.o uaccess.o debug-stub.o irq.o sleep.o uaccess.o
obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
......
/* semaphore.c: FR-V semaphores
*
* Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from lib/rwsem-spinlock.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <asm/semaphore.h>
struct sem_waiter {
struct list_head list;
struct task_struct *task;
};
#ifdef CONFIG_DEBUG_SEMAPHORE
void semtrace(struct semaphore *sem, const char *str)
{
if (sem->debug)
printk("[%d] %s({%d,%d})\n",
current->pid,
str,
sem->counter,
list_empty(&sem->wait_list) ? 0 : 1);
}
#else
#define semtrace(SEM,STR) do { } while(0)
#endif
/*
* wait for a token to be granted from a semaphore
* - entered with lock held and interrupts disabled
*/
void __down(struct semaphore *sem, unsigned long flags)
{
struct task_struct *tsk = current;
struct sem_waiter waiter;
semtrace(sem, "Entering __down");
/* set up my own style of waitqueue */
waiter.task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the semaphore */
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
for (;;) {
if (list_empty(&waiter.list))
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
semtrace(sem, "Leaving __down");
}
EXPORT_SYMBOL(__down);
/*
* interruptibly wait for a token to be granted from a semaphore
* - entered with lock held and interrupts disabled
*/
int __down_interruptible(struct semaphore *sem, unsigned long flags)
{
struct task_struct *tsk = current;
struct sem_waiter waiter;
int ret;
semtrace(sem,"Entering __down_interruptible");
/* set up my own style of waitqueue */
waiter.task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
set_task_state(tsk, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the semaphore */
ret = 0;
for (;;) {
if (list_empty(&waiter.list))
break;
if (unlikely(signal_pending(current)))
goto interrupted;
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
out:
tsk->state = TASK_RUNNING;
semtrace(sem, "Leaving __down_interruptible");
return ret;
interrupted:
spin_lock_irqsave(&sem->wait_lock, flags);
if (!list_empty(&waiter.list)) {
list_del(&waiter.list);
ret = -EINTR;
}
spin_unlock_irqrestore(&sem->wait_lock, flags);
if (ret == -EINTR)
put_task_struct(current);
goto out;
}
EXPORT_SYMBOL(__down_interruptible);
/*
* release a single token back to a semaphore
* - entered with lock held and interrupts disabled
*/
void __up(struct semaphore *sem)
{
struct task_struct *tsk;
struct sem_waiter *waiter;
semtrace(sem,"Entering __up");
/* grant the token to the process at the front of the queue */
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
*/
list_del_init(&waiter->list);
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
semtrace(sem,"Leaving __up");
}
EXPORT_SYMBOL(__up);
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := vmlinux.lds extra-y := vmlinux.lds
obj-y := process.o traps.o ptrace.o irq.o \ obj-y := process.o traps.o ptrace.o irq.o \
sys_h8300.o time.o semaphore.o signal.o \ sys_h8300.o time.o signal.o \
setup.o gpio.o init_task.o syscalls.o \ setup.o gpio.o init_task.o syscalls.o \
entry.o entry.o
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/gpio.h> #include <asm/gpio.h>
......
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
spinlock_t semaphore_wake_lock;
#endif
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_HEAD(task_state) \
\
\
current->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
current->state = (task_state); \
} \
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, current);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds ...@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o unwind.o mca.o mca_asm.o topology.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
......
...@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page); ...@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
EXPORT_SYMBOL(csum_ipv6_magic); EXPORT_SYMBOL(csum_ipv6_magic);
#include <asm/semaphore.h>
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
#include <asm/page.h> #include <asm/page.h>
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
......
/*
* IA-64 semaphore implementation (derived from x86 version).
*
* Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* Semaphores are implemented using a two-way counter: The "count"
* variable is decremented for each process that tries to acquire the
* semaphore, while the "sleepers" variable is a count of such
* acquires.
*
* Notably, the inline "up()" and "down()" functions can efficiently
* test if they need to do any extra work (up needs to do something
* only if count was negative before the increment operation.
*
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
* whole semaphore business. The critical part is the inline stuff in
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
/*
* Logic:
* - Only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - When we go from a non-negative count to a negative do we
* (a) synchronize with the "sleepers" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void
__up (struct semaphore *sem)
{
wake_up(&sem->wait);
}
void __sched __down (struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
}
int __sched __down_interruptible (struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* wait_queue_head. The "-1" is because we're
* still hoping to get the semaphore.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
return retval;
}
/*
* Trylock failed - make sure we correct for having decremented the
* count.
*/
int
__down_trylock (struct semaphore *sem)
{
unsigned long flags;
int sleepers;
spin_lock_irqsave(&sem->wait.lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if (!atomic_add_negative(sleepers, &sem->count)) {
wake_up_locked(&sem->wait);
}
spin_unlock_irqrestore(&sem->wait.lock, flags);
return 1;
}
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o m32r_ksyms.o sys_m32r.o signal.o ptrace.o
obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h> #include <asm/checksum.h>
...@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu); ...@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down_trylock);
/* Networking helper routines. */ /* Networking helper routines. */
/* Delay loops */ /* Delay loops */
......
/*
* linux/arch/m32r/semaphore.c
* orig : i386 2.6.4
*
* M32R semaphore implementation.
*
* Copyright (c) 2002 - 2004 Hitoshi Yamamoto
*/
/*
* i386 semaphore implementation.
*
* (C) Copyright 1999 Linus Torvalds
*
* Portions Copyright 1999 Red Hat, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
*/
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is protected
* by the spinlock in the semaphore's waitqueue head.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
asmlinkage void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
asmlinkage void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* the wait_queue_head.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
}
asmlinkage int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
unsigned long flags;
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irqsave(&sem->wait.lock, flags);
add_wait_queue_exclusive_locked(&sem->wait, &wait);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock in
* wait_queue_head. The "-1" is because we're
* still hoping to get the semaphore.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irqrestore(&sem->wait.lock, flags);
schedule();
spin_lock_irqsave(&sem->wait.lock, flags);
tsk->state = TASK_INTERRUPTIBLE;
}
remove_wait_queue_locked(&sem->wait, &wait);
wake_up_locked(&sem->wait);
spin_unlock_irqrestore(&sem->wait.lock, flags);
tsk->state = TASK_RUNNING;
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*
* We could have done the trylock with a
* single "cmpxchg" without failure cases,
* but then it wouldn't work on a 386.
*/
asmlinkage int __down_trylock(struct semaphore * sem)
{
int sleepers;
unsigned long flags;
spin_lock_irqsave(&sem->wait.lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock in the
* wait_queue_head.
*/
if (!atomic_add_negative(sleepers, &sem->count)) {
wake_up_locked(&sem->wait);
}
spin_unlock_irqrestore(&sem->wait.lock, flags);
return 1;
}
...@@ -10,7 +10,7 @@ endif ...@@ -10,7 +10,7 @@ endif
extra-y += vmlinux.lds extra-y += vmlinux.lds
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
devres-y = ../../../kernel/irq/devres.o devres-y = ../../../kernel/irq/devres.o
......
#include <linux/module.h> #include <linux/module.h>
#include <asm/semaphore.h>
asmlinkage long long __ashldi3 (long long, int); asmlinkage long long __ashldi3 (long long, int);
asmlinkage long long __ashrdi3 (long long, int); asmlinkage long long __ashrdi3 (long long, int);
...@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3); ...@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
spinlock_t semaphore_wake_lock;
#endif
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_HEAD(task_state) \
\
\
current->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
current->state = (task_state); \
} \
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, current);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -5,4 +5,4 @@ ...@@ -5,4 +5,4 @@
EXTRA_AFLAGS := -traditional EXTRA_AFLAGS := -traditional
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
checksum.o string.o semaphore.o uaccess.o checksum.o string.o uaccess.o
/*
* linux/arch/m68k/lib/semaphore.S
*
* Copyright (C) 1996 Linus Torvalds
*
* m68k version by Andreas Schwab
*/
#include <linux/linkage.h>
#include <asm/semaphore.h>
/*
* The semaphore operations have a special calling sequence that
* allow us to do a simpler in-line version of them. These routines
* need to convert that sequence back into the C sequence when
* there is contention on the semaphore.
*/
ENTRY(__down_failed)
moveml %a0/%d0/%d1,-(%sp)
movel %a1,-(%sp)
jbsr __down
movel (%sp)+,%a1
moveml (%sp)+,%a0/%d0/%d1
rts
ENTRY(__down_failed_interruptible)
movel %a0,-(%sp)
movel %d1,-(%sp)
movel %a1,-(%sp)
jbsr __down_interruptible
movel (%sp)+,%a1
movel (%sp)+,%d1
movel (%sp)+,%a0
rts
ENTRY(__down_failed_trylock)
movel %a0,-(%sp)
movel %d1,-(%sp)
movel %a1,-(%sp)
jbsr __down_trylock
movel (%sp)+,%a1
movel (%sp)+,%d1
movel (%sp)+,%a0
rts
ENTRY(__up_wakeup)
moveml %a0/%d0/%d1,-(%sp)
movel %a1,-(%sp)
jbsr __up
movel (%sp)+,%a1
moveml (%sp)+,%a0/%d0/%d1
rts
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := vmlinux.lds extra-y := vmlinux.lds
obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_COMEMPCI) += comempci.o obj-$(CONFIG_COMEMPCI) += comempci.o
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); ...@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/* /*
* libgcc functions - functions that are used internally by the * libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that * compiler... (prototypes are not correct though, but that
......
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
spinlock_t semaphore_wake_lock;
#endif
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_HEAD(task_state) \
\
\
current->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
current->state = (task_state); \
} \
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, current);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -4,4 +4,4 @@ ...@@ -4,4 +4,4 @@
lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
checksum.o semaphore.o memcpy.o memset.o delay.o checksum.o memcpy.o memset.o delay.o
/*
* linux/arch/m68k/lib/semaphore.S
*
* Copyright (C) 1996 Linus Torvalds
*
* m68k version by Andreas Schwab
*
* MAR/1999 -- modified to support ColdFire (gerg@snapgear.com)
*/
#include <linux/linkage.h>
#include <asm/semaphore.h>
/*
* "down_failed" is called with the eventual return address
* in %a0, and the address of the semaphore in %a1. We need
* to increment the number of waiters on the semaphore,
* call "__down()", and then eventually return to try again.
*/
ENTRY(__down_failed)
#ifdef CONFIG_COLDFIRE
subl #12,%sp
moveml %a0/%d0/%d1,(%sp)
#else
moveml %a0/%d0/%d1,-(%sp)
#endif
movel %a1,-(%sp)
jbsr __down
movel (%sp)+,%a1
movel (%sp)+,%d0
movel (%sp)+,%d1
rts
ENTRY(__down_failed_interruptible)
movel %a0,-(%sp)
movel %d1,-(%sp)
movel %a1,-(%sp)
jbsr __down_interruptible
movel (%sp)+,%a1
movel (%sp)+,%d1
rts
ENTRY(__up_wakeup)
#ifdef CONFIG_COLDFIRE
subl #12,%sp
moveml %a0/%d0/%d1,(%sp)
#else
moveml %a0/%d0/%d1,-(%sp)
#endif
movel %a1,-(%sp)
jbsr __up
movel (%sp)+,%a1
movel (%sp)+,%d0
movel (%sp)+,%d1
rts
ENTRY(__down_failed_trylock)
movel %a0,-(%sp)
movel %d1,-(%sp)
movel %a1,-(%sp)
jbsr __down_trylock
movel (%sp)+,%a1
movel (%sp)+,%d1
movel (%sp)+,%a0
rts
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ ptrace.o reset.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o time.o topology.o traps.o unaligned.o
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
......
/*
* MIPS-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
* Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/cpu-features.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
#include <asm/war.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*
* On machines without lld/scd we need a spinlock to make the manipulation of
* sem->count and sem->waking atomic. Scalability isn't an issue because
* this lock is used on UP only so it's just an empty variable.
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %2 # __sem_update_count \n"
" sra %1, %0, 31 \n"
" not %1 \n"
" and %1, %0, %1 \n"
" addu %1, %1, %3 \n"
" sc %1, %2 \n"
" beqzl %1, 1b \n"
" .set mips0 \n"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (incr), "m" (sem->count));
} else if (cpu_has_llsc) {
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %2 # __sem_update_count \n"
" sra %1, %0, 31 \n"
" not %1 \n"
" and %1, %0, %1 \n"
" addu %1, %1, %3 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
" .set mips0 \n"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (incr), "m" (sem->count));
} else {
static DEFINE_SPINLOCK(semaphore_lock);
unsigned long flags;
spin_lock_irqsave(&semaphore_lock, flags);
old_count = atomic_read(&sem->count);
tmp = max_t(int, old_count, 0) + incr;
atomic_set(&sem->count, tmp);
spin_unlock_irqrestore(&semaphore_lock, flags);
}
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \ obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
switch_to.o mn10300_ksyms.o kernel_execve.o switch_to.o mn10300_ksyms.o kernel_execve.o
......
/* MN10300 Semaphore implementation
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <asm/semaphore.h>
struct sem_waiter {
struct list_head list;
struct task_struct *task;
};
#if SEMAPHORE_DEBUG
void semtrace(struct semaphore *sem, const char *str)
{
if (sem->debug)
printk(KERN_DEBUG "[%d] %s({%d,%d})\n",
current->pid,
str,
atomic_read(&sem->count),
list_empty(&sem->wait_list) ? 0 : 1);
}
#else
#define semtrace(SEM, STR) do { } while (0)
#endif
/*
* wait for a token to be granted from a semaphore
* - entered with lock held and interrupts disabled
*/
void __down(struct semaphore *sem, unsigned long flags)
{
struct task_struct *tsk = current;
struct sem_waiter waiter;
semtrace(sem, "Entering __down");
/* set up my own style of waitqueue */
waiter.task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the semaphore */
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
for (;;) {
if (!waiter.task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
semtrace(sem, "Leaving __down");
}
EXPORT_SYMBOL(__down);
/*
* interruptibly wait for a token to be granted from a semaphore
* - entered with lock held and interrupts disabled
*/
int __down_interruptible(struct semaphore *sem, unsigned long flags)
{
struct task_struct *tsk = current;
struct sem_waiter waiter;
int ret;
semtrace(sem, "Entering __down_interruptible");
/* set up my own style of waitqueue */
waiter.task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
set_task_state(tsk, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the semaphore */
ret = 0;
for (;;) {
if (!waiter.task)
break;
if (unlikely(signal_pending(current)))
goto interrupted;
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
out:
tsk->state = TASK_RUNNING;
semtrace(sem, "Leaving __down_interruptible");
return ret;
interrupted:
spin_lock_irqsave(&sem->wait_lock, flags);
list_del(&waiter.list);
spin_unlock_irqrestore(&sem->wait_lock, flags);
ret = 0;
if (!waiter.task) {
put_task_struct(current);
ret = -EINTR;
}
goto out;
}
EXPORT_SYMBOL(__down_interruptible);
/*
* release a single token back to a semaphore
* - entered with lock held and interrupts disabled
*/
void __up(struct semaphore *sem)
{
struct task_struct *tsk;
struct sem_waiter *waiter;
semtrace(sem, "Entering __up");
/* grant the token to the process at the front of the queue */
waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
*/
list_del_init(&waiter->list);
tsk = waiter->task;
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
semtrace(sem, "Leaving __up");
}
EXPORT_SYMBOL(__up);
...@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional ...@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o semaphore.o \ ptrace.o hardware.o inventory.o drivers.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
topology.o topology.o
......
...@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio); ...@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio); EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io); EXPORT_SYMBOL(memset_io);
#include <asm/semaphore.h>
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down);
extern void $$divI(void); extern void $$divI(void);
extern void $$divU(void); extern void $$divU(void);
extern void $$remI(void); extern void $$remI(void);
......
/*
* Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
*/
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
/*
* Semaphores are complex as we wish to avoid using two variables.
* `count' has multiple roles, depending on its value. If it is positive
* or zero, there are no waiters. The functions here will never be
* called; see <asm/semaphore.h>
*
* When count is -1 it indicates there is at least one task waiting
* for the semaphore.
*
* When count is less than that, there are '- count - 1' wakeups
* pending. ie if it has value -3, there are 2 wakeups pending.
*
* Note that these functions are only called when there is contention
* on the lock, and as such all this is the "non-critical" part of the
* whole semaphore business. The critical part is the inline stuff in
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
sem->count--;
wake_up(&sem->wait);
}
#define wakers(count) (-1 - count)
#define DOWN_HEAD \
int ret = 0; \
DECLARE_WAITQUEUE(wait, current); \
\
/* Note that someone is waiting */ \
if (sem->count == 0) \
sem->count = -1; \
\
/* protected by the sentry still -- use unlocked version */ \
wait.flags = WQ_FLAG_EXCLUSIVE; \
__add_wait_queue_tail(&sem->wait, &wait); \
lost_race: \
spin_unlock_irq(&sem->sentry); \
#define DOWN_TAIL \
spin_lock_irq(&sem->sentry); \
if (wakers(sem->count) == 0 && ret == 0) \
goto lost_race; /* Someone stole our wakeup */ \
__remove_wait_queue(&sem->wait, &wait); \
current->state = TASK_RUNNING; \
if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
sem->count = wakers(sem->count);
#define UPDATE_COUNT \
sem->count += (sem->count < 0) ? 1 : - 1;
void __sched __down(struct semaphore * sem)
{
DOWN_HEAD
for(;;) {
set_task_state(current, TASK_UNINTERRUPTIBLE);
/* we can _read_ this without the sentry */
if (sem->count != -1)
break;
schedule();
}
DOWN_TAIL
UPDATE_COUNT
}
int __sched __down_interruptible(struct semaphore * sem)
{
DOWN_HEAD
for(;;) {
set_task_state(current, TASK_INTERRUPTIBLE);
/* we can _read_ this without the sentry */
if (sem->count != -1)
break;
if (signal_pending(current)) {
ret = -EINTR;
break;
}
schedule();
}
DOWN_TAIL
if (!ret) {
UPDATE_COUNT
}
return ret;
}
...@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC ...@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC CFLAGS_btext.o += -fPIC
endif endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \ irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \ init_task.o process.o systbl.o idle.o \
signal.o signal.o
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
......
/*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);
/*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
PPC405_ERR77(0,%3)
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
tsk->state = TASK_INTERRUPTIBLE;
}
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
...@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull ...@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull
obj-y := bitmap.o traps.o time.o process.o base.o early.o \ obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
......
...@@ -26,13 +26,6 @@ EXPORT_SYMBOL(_ni_bitmap); ...@@ -26,13 +26,6 @@ EXPORT_SYMBOL(_ni_bitmap);
EXPORT_SYMBOL(_zb_findmap); EXPORT_SYMBOL(_zb_findmap);
EXPORT_SYMBOL(_sb_findmap); EXPORT_SYMBOL(_sb_findmap);
/*
* semaphore ops
*/
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
/* /*
* binfmt_elf loader * binfmt_elf loader
*/ */
......
/*
* linux/arch/s390/kernel/semaphore.c
*
* S390 version
* Copyright (C) 1998-2000 IBM Corporation
* Author(s): Martin Schwidefsky
*
* Derived from "linux/arch/i386/kernel/semaphore.c
* Copyright (C) 1999, Linus Torvalds
*
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Atomically update sem->count. Equivalent to:
* old_val = sem->count.counter;
* new_val = ((old_val >= 0) ? old_val : 0) + incr;
* sem->count.counter = new_val;
* return old_val;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_val, new_val;
asm volatile(
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jhe 1f\n"
" lhi %1,0\n"
"1: ar %1,%4\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
: "a" (&sem->count), "d" (incr), "m" (sem->count)
: "cc");
return old_val;
}
/*
* The inline function up() incremented count but the result
* was <= 0. This indicates that some process is waiting on
* the semaphore. The semaphore is free and we'll wake the
* first sleeping process, so we set count to 1 unless some
* other cpu has called up in the meantime in which case
* we just increment count by 1.
*/
void __up(struct semaphore *sem)
{
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
/*
* The inline function down() decremented count and the result
* was < 0. The wait loop will atomically test and update the
* semaphore counter following the rules:
* count > 0: decrement count, wake up queue and exit.
* count <= 0: set count to -1, go to sleep.
*/
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
}
/*
* Same as __down() with an additional test for signals.
* If a signal is pending the count is updated as follows:
* count > 0: wake up queue and exit.
* count <= 0: set count to 0, wake up queue and exit.
*/
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := head_32.o init_task.o vmlinux.lds extra-y := head_32.o init_task.o vmlinux.lds
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \ ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
syscalls_32.o time_32.o topology.o traps.o traps_32.o syscalls_32.o time_32.o topology.o traps.o traps_32.o
obj-y += cpu/ timers/ obj-y += cpu/ timers/
......
extra-y := head_64.o init_task.o vmlinux.lds extra-y := head_64.o init_task.o vmlinux.lds
obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \ ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
syscalls_64.o time_64.o topology.o traps.o traps_64.o syscalls_64.o time_64.o topology.o traps.o traps_64.o
obj-y += cpu/ timers/ obj-y += cpu/ timers/
......
/*
* Just taken from alpha implementation.
* This can't work well, perhaps.
*/
/*
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
DEFINE_SPINLOCK(semaphore_wake_lock);
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_VAR \
struct task_struct *tsk = current; \
wait_queue_t wait; \
init_waitqueue_entry(&wait, tsk);
#define DOWN_HEAD(task_state) \
\
\
tsk->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
tsk->state = (task_state); \
} \
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, tsk);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h> #include <asm/checksum.h>
...@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user); ...@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(get_vm_area); EXPORT_SYMBOL(get_vm_area);
#endif #endif
/* semaphore exports */
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__const_udelay);
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/screen_info.h> #include <linux/screen_info.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/checksum.h> #include <asm/checksum.h>
...@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); ...@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(screen_info); EXPORT_SYMBOL(screen_info);
#endif #endif
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__put_user_asm_l); EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
......
...@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \ ...@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
sys_sparc.o sunos_asm.o systbls.o \ sys_sparc.o sunos_asm.o systbls.o \
time.o windows.o cpu.o devices.o sclow.o \ time.o windows.o cpu.o devices.o sclow.o \
tadpole.o tick14.o ptrace.o sys_solaris.o \ tadpole.o tick14.o ptrace.o sys_solaris.o \
unaligned.o una_asm.o muldiv.o semaphore.o \ unaligned.o una_asm.o muldiv.o \
prom.o of_device.o devres.o prom.o of_device.o devres.o
devres-y = ../../../kernel/irq/devres.o devres-y = ../../../kernel/irq/devres.o
......
/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
/* sparc32 semaphore implementation, based on i386 version */
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is
* protected by the semaphore spinlock.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
static DEFINE_SPINLOCK(semaphore_lock);
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic24_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
*/
if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*/
int __down_trylock(struct semaphore * sem)
{
int sleepers;
unsigned long flags;
spin_lock_irqsave(&semaphore_lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic24_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
...@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try); ...@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try);
EXPORT_SYMBOL(___rw_read_exit); EXPORT_SYMBOL(___rw_read_exit);
EXPORT_SYMBOL(___rw_write_enter); EXPORT_SYMBOL(___rw_write_enter);
#endif #endif
/* semaphores */
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(sparc_valid_addr_bitmap); EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL(phys_base); EXPORT_SYMBOL(phys_base);
......
...@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds ...@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \ obj-y := process.o setup.o cpu.o idprom.o \
traps.o auxio.o una_asm.o sysfs.o iommu.o \ traps.o auxio.o una_asm.o sysfs.o iommu.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \ unaligned.o central.o pci.o starfire.o \
power.o sbus.o sparc64_ksyms.o chmc.o \ power.o sbus.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
......
/* semaphore.c: Sparc64 semaphore implementation.
*
* This is basically the PPC semaphore scheme ported to use
* the sparc64 atomic instructions, so see the PPC code for
* credits.
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
"1: ldsw [%3], %0\n"
" mov %0, %1\n"
" cmp %0, 0\n"
" movl %%icc, 0, %1\n"
" add %1, %4, %1\n"
" cas [%3], %0, %1\n"
" cmp %0, %1\n"
" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
" nop\n"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
static void __up(struct semaphore *sem)
{
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
void up(struct semaphore *sem)
{
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count + 1;
* sem->count = new_val;
* if (old_val < 0)
* __up(sem);
*
* The (old_val < 0) test is equivalent to
* the more straightforward (new_val <= 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! up sem(%0)\n"
" membar #StoreLoad | #LoadLoad\n"
"1: lduw [%0], %%g1\n"
" add %%g1, 1, %%g7\n"
" cas [%0], %%g1, %%g7\n"
" cmp %%g1, %%g7\n"
" bne,pn %%icc, 1b\n"
" addcc %%g7, 1, %%g0\n"
" membar #StoreLoad | #StoreStore\n"
" ble,pn %%icc, 3f\n"
" nop\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g1\n"
" save %%sp, -160, %%sp\n"
" call %1\n"
" mov %%g1, %%o0\n"
" ba,pt %%xcc, 2b\n"
" restore\n"
" .previous\n"
: : "r" (sem), "i" (__up)
: "g1", "g2", "g3", "g7", "memory", "cc");
}
static void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
void __sched down(struct semaphore *sem)
{
might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* __down(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down sem(%0)\n"
"1: lduw [%0], %%g1\n"
" sub %%g1, 1, %%g7\n"
" cas [%0], %%g1, %%g7\n"
" cmp %%g1, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" membar #StoreLoad | #StoreStore\n"
" bl,pn %%icc, 3f\n"
" nop\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g1\n"
" save %%sp, -160, %%sp\n"
" call %1\n"
" mov %%g1, %%o0\n"
" ba,pt %%xcc, 2b\n"
" restore\n"
" .previous\n"
: : "r" (sem), "i" (__down)
: "g1", "g2", "g3", "g7", "memory", "cc");
}
int down_trylock(struct semaphore *sem)
{
int ret;
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* if (old_val < 1) {
* ret = 1;
* } else {
* sem->count = new_val;
* ret = 0;
* }
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_trylock sem(%1) ret(%0)\n"
"1: lduw [%1], %%g1\n"
" sub %%g1, 1, %%g7\n"
" cmp %%g1, 1\n"
" bl,pn %%icc, 2f\n"
" mov 1, %0\n"
" cas [%1], %%g1, %%g7\n"
" cmp %%g1, %%g7\n"
" bne,pn %%icc, 1b\n"
" mov 0, %0\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
: "=&r" (ret)
: "r" (sem)
: "g1", "g7", "memory", "cc");
return ret;
}
static int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
tsk->state = TASK_INTERRUPTIBLE;
}
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
int __sched down_interruptible(struct semaphore *sem)
{
int ret = 0;
might_sleep();
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* ret = __down_interruptible(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_interruptible sem(%2) ret(%0)\n"
"1: lduw [%2], %%g1\n"
" sub %%g1, 1, %%g7\n"
" cas [%2], %%g1, %%g7\n"
" cmp %%g1, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" membar #StoreLoad | #StoreStore\n"
" bl,pn %%icc, 3f\n"
" nop\n"
"2:\n"
" .subsection 2\n"
"3: mov %2, %%g1\n"
" save %%sp, -160, %%sp\n"
" call %3\n"
" mov %%g1, %%o0\n"
" ba,pt %%xcc, 2b\n"
" restore\n"
" .previous\n"
: "=r" (ret)
: "0" (ret), "r" (sem), "i" (__down_interruptible)
: "g1", "g2", "g3", "g7", "memory", "cc");
return ret;
}
...@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount); ...@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount);
EXPORT_SYMBOL(sparc64_get_clock_tick); EXPORT_SYMBOL(sparc64_get_clock_tick);
/* semaphores */
EXPORT_SYMBOL(down);
EXPORT_SYMBOL(down_trylock);
EXPORT_SYMBOL(down_interruptible);
EXPORT_SYMBOL(up);
/* RW semaphores */ /* RW semaphores */
EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock); EXPORT_SYMBOL(__down_read_trylock);
......
...@@ -19,10 +19,6 @@ config 64BIT ...@@ -19,10 +19,6 @@ config 64BIT
bool bool
default n default n
config SEMAPHORE_SLEEPERS
bool
default y
config 3_LEVEL_PGTABLES config 3_LEVEL_PGTABLES
bool "Three-level pagetables (EXPERIMENTAL)" bool "Three-level pagetables (EXPERIMENTAL)"
default n default n
......
...@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK ...@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK
bool bool
default y default y
config SEMAPHORE_SLEEPERS
bool
default y
config 3_LEVEL_PGTABLES config 3_LEVEL_PGTABLES
bool bool
default y default y
......
#include "linux/module.h" #include "linux/module.h"
#include "linux/in6.h"
#include "linux/rwsem.h"
#include "asm/byteorder.h"
#include "asm/delay.h"
#include "asm/semaphore.h"
#include "asm/uaccess.h"
#include "asm/checksum.h" #include "asm/checksum.h"
#include "asm/errno.h"
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/* Networking helper routines. */ /* Networking helper routines. */
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -3,7 +3,7 @@ OBJ = built-in.o ...@@ -3,7 +3,7 @@ OBJ = built-in.o
.S.o: .S.o:
$(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \ OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
ptrace_user.o sysrq.o ptrace_user.o sysrq.o
EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
...@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c ...@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c
sigcontext.o: sigcontext.c sigcontext.o: sigcontext.c
$(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
semaphore.c:
rm -f $@
ln -s $(srctree)/arch/ppc/kernel/$@ $@
checksum.S: checksum.S:
rm -f $@ rm -f $@
ln -s $(srctree)/arch/ppc/lib/$@ $@ ln -s $(srctree)/arch/ppc/lib/$@ $@
...@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h ...@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h
$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
rm -f asm rm -f asm
clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
#include "linux/module.h" #include "linux/module.h"
#include "linux/in6.h" #include "asm/string.h"
#include "linux/rwsem.h"
#include "asm/byteorder.h"
#include "asm/semaphore.h"
#include "asm/uaccess.h"
#include "asm/checksum.h"
#include "asm/errno.h"
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/*XXX: we need them because they would be exported by x86_64 */ /*XXX: we need them because they would be exported by x86_64 */
EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memcpy);
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \ obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
signal.o irq.o mach.o ptrace.o bug.o signal.o irq.o mach.o ptrace.o bug.o
obj-$(CONFIG_MODULES) += module.o v850_ksyms.o obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
# chip-specific code # chip-specific code
......
/*
* arch/v850/kernel/semaphore.c -- Semaphore support
*
* Copyright (C) 1998-2000 IBM Corporation
* Copyright (C) 1999 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
* Author(s): Martin Schwidefsky
* which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to acquire the semaphore, while the "sleeping"
* variable is a count of such acquires.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* "sleeping" and the contention routine ordering is
* protected by the semaphore spinlock.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
/*
* Logic:
* - only on a boundary condition do we need to care. When we go
* from a negative count to a non-negative, we wake people up.
* - when we go from a non-negative count to a negative do we
* (a) synchronize with the "sleeper" count and (b) make sure
* that we're on the wakeup list before we synchronize so that
* we cannot lose wakeup events.
*/
void __up(struct semaphore *sem)
{
wake_up(&sem->wait);
}
static DEFINE_SPINLOCK(semaphore_lock);
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers++;
for (;;) {
int sleepers = sem->sleepers;
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
wake_up(&sem->wait);
}
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
add_wait_queue_exclusive(&sem->wait, &wait);
spin_lock_irq(&semaphore_lock);
sem->sleepers ++;
for (;;) {
int sleepers = sem->sleepers;
/*
* With signals pending, this turns into
* the trylock failure case - we won't be
* sleeping, and we* can't get the lock as
* it has contention. Just correct the count
* and exit.
*/
if (signal_pending(current)) {
retval = -EINTR;
sem->sleepers = 0;
atomic_add(sleepers, &sem->count);
break;
}
/*
* Add "everybody else" into it. They aren't
* playing, because we own the spinlock. The
* "-1" is because we're still hoping to get
* the lock.
*/
if (!atomic_add_negative(sleepers - 1, &sem->count)) {
sem->sleepers = 0;
break;
}
sem->sleepers = 1; /* us - see -1 above */
spin_unlock_irq(&semaphore_lock);
schedule();
tsk->state = TASK_INTERRUPTIBLE;
spin_lock_irq(&semaphore_lock);
}
spin_unlock_irq(&semaphore_lock);
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
wake_up(&sem->wait);
return retval;
}
/*
* Trylock failed - make sure we correct for
* having decremented the count.
*/
int __down_trylock(struct semaphore * sem)
{
unsigned long flags;
int sleepers;
spin_lock_irqsave(&semaphore_lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
/*
* Add "everybody else" and us into it. They aren't
* playing, because we own the spinlock.
*/
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/current.h> #include <asm/current.h>
...@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset); ...@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset);
EXPORT_SYMBOL (memcpy); EXPORT_SYMBOL (memcpy);
EXPORT_SYMBOL (memmove); EXPORT_SYMBOL (memmove);
/* semaphores */
EXPORT_SYMBOL (__down);
EXPORT_SYMBOL (__down_interruptible);
EXPORT_SYMBOL (__down_trylock);
EXPORT_SYMBOL (__up);
/* /*
* libgcc functions - functions that are used internally by the * libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that * compiler... (prototypes are not correct though, but that
......
...@@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT ...@@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT
config HAVE_LATENCYTOP_SUPPORT config HAVE_LATENCYTOP_SUPPORT
def_bool y def_bool y
config SEMAPHORE_SLEEPERS
def_bool y
config FAST_CMPXCHG_LOCAL config FAST_CMPXCHG_LOCAL
bool bool
default y default y
......
#include <linux/module.h> #include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/checksum.h> #include <asm/checksum.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
/* Networking helper routines. */ /* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(csum_partial_copy_generic);
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm/semaphore.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -12,11 +11,6 @@ ...@@ -12,11 +11,6 @@
EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup);
EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4); EXPORT_SYMBOL(__get_user_4);
......
...@@ -30,89 +30,6 @@ ...@@ -30,89 +30,6 @@
* value or just clobbered.. * value or just clobbered..
*/ */
.section .sched.text, "ax" .section .sched.text, "ax"
ENTRY(__down_failed)
CFI_STARTPROC
FRAME
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx,0
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx,0
call __down
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ecx
popl %edx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edx
ENDFRAME
ret
CFI_ENDPROC
ENDPROC(__down_failed)
ENTRY(__down_failed_interruptible)
CFI_STARTPROC
FRAME
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx,0
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx,0
call __down_interruptible
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ecx
popl %edx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edx
ENDFRAME
ret
CFI_ENDPROC
ENDPROC(__down_failed_interruptible)
ENTRY(__down_failed_trylock)
CFI_STARTPROC
FRAME
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx,0
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx,0
call __down_trylock
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ecx
popl %edx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edx
ENDFRAME
ret
CFI_ENDPROC
ENDPROC(__down_failed_trylock)
ENTRY(__up_wakeup)
CFI_STARTPROC
FRAME
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx,0
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx,0
call __up
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ecx
popl %edx
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE edx
ENDFRAME
ret
CFI_ENDPROC
ENDPROC(__up_wakeup)
/* /*
* rw spinlock fallbacks * rw spinlock fallbacks
......
...@@ -41,11 +41,6 @@ ...@@ -41,11 +41,6 @@
thunk rwsem_downgrade_thunk,rwsem_downgrade_wake thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
#endif #endif
thunk __down_failed,__down
thunk_retrax __down_failed_interruptible,__down_interruptible
thunk_retrax __down_failed_trylock,__down_trylock
thunk __up_wakeup,__up
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
thunk trace_hardirqs_on_thunk,trace_hardirqs_on thunk trace_hardirqs_on_thunk,trace_hardirqs_on
thunk trace_hardirqs_off_thunk,trace_hardirqs_off thunk trace_hardirqs_off_thunk,trace_hardirqs_off
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := head.o vmlinux.lds extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
pci-dma.o init_task.o io.o pci-dma.o init_task.o io.o
......
/*
* arch/xtensa/kernel/semaphore.c
*
* Generic semaphore code. Buyer beware. Do your own specific changes
* in <asm/semaphore-helper.h>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
* Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
* Kevin Chea
*/
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* These two _must_ execute atomically wrt each other.
*/
static __inline__ void wake_one_more(struct semaphore * sem)
{
atomic_inc((atomic_t *)&sem->sleepers);
}
static __inline__ int waking_non_zero(struct semaphore *sem)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_interruptible:
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*
* We must undo the sem->count down_interruptible() increment while we are
* protected by the spinlock in order to make atomic this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_trylock:
* 1 failed to lock
* 0 got the lock
*
* We must undo the sem->count down_trylock() increment while we are
* protected by the spinlock in order to make atomic this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers <= 0)
atomic_inc(&sem->count);
else {
sem->sleepers--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
DEFINE_SPINLOCK(semaphore_wake_lock);
/*
* Semaphores are implemented using a two-way counter:
* The "count" variable is decremented for each process
* that tries to sleep, while the "waking" variable is
* incremented when the "up()" code goes to wake up waiting
* processes.
*
* Notably, the inline "up()" and "down()" functions can
* efficiently test if they need to do any extra work (up
* needs to do something only if count was negative before
* the increment operation.
*
* waking_non_zero() (from asm/semaphore.h) must execute
* atomically.
*
* When __up() is called, the count was negative before
* incrementing it, and we need to wake up somebody.
*
* This routine adds one to the count of processes that need to
* wake up and exit. ALL waiting processes actually wake up but
* only the one that gets to the "waking" field first will gate
* through and acquire the semaphore. The others will go back
* to sleep.
*
* Note that these functions are only called when there is
* contention on the lock, and as such all this is the
* "non-critical" part of the whole semaphore business. The
* critical part is the inline stuff in <asm/semaphore.h>
* where we want to avoid any extra jumps and calls.
*/
void __up(struct semaphore *sem)
{
wake_one_more(sem);
wake_up(&sem->wait);
}
/*
* Perform the "down" function. Return zero for semaphore acquired,
* return negative for signalled out of the function.
*
* If called from __down, the return is ignored and the wait loop is
* not interruptible. This means that a task waiting on a semaphore
* using "down()" cannot be killed until someone does an "up()" on
* the semaphore.
*
* If called from __down_interruptible, the return value gets checked
* upon return. If the return value is negative then the task continues
* with the negative value in the return register (it can be tested by
* the caller).
*
* Either form may be used in conjunction with "up()".
*
*/
#define DOWN_VAR \
struct task_struct *tsk = current; \
wait_queue_t wait; \
init_waitqueue_entry(&wait, tsk);
#define DOWN_HEAD(task_state) \
\
\
tsk->state = (task_state); \
add_wait_queue(&sem->wait, &wait); \
\
/* \
* Ok, we're set up. sem->count is known to be less than zero \
* so we must wait. \
* \
* We can let go the lock for purposes of waiting. \
* We re-acquire it after awaking so as to protect \
* all semaphore operations. \
* \
* If "up()" is called before we call waking_non_zero() then \
* we will catch it right away. If it is called later then \
* we will have to go through a wakeup cycle to catch it. \
* \
* Multiple waiters contend for the semaphore lock to see \
* who gets to gate through and who has to wait some more. \
*/ \
for (;;) {
#define DOWN_TAIL(task_state) \
tsk->state = (task_state); \
} \
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
if (waking_non_zero(sem))
break;
schedule();
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
DOWN_HEAD(TASK_INTERRUPTIBLE)
ret = waking_non_zero_interruptible(sem, tsk);
if (ret)
{
if (ret == 1)
/* ret != 0 only if we get interrupted -arca */
ret = 0;
break;
}
schedule();
DOWN_TAIL(TASK_INTERRUPTIBLE)
return ret;
}
int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/semaphore.h>
#ifdef CONFIG_BLK_DEV_FD #ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h> #include <asm/floppy.h>
#endif #endif
...@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3); ...@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__umoddi3);
/*
* Semaphore operations
*/
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
#ifdef CONFIG_NET #ifdef CONFIG_NET
/* /*
* Networking support * Networking support
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright (C) 2000 Andrew Henroid * Copyright (C) 2000 Andrew Henroid
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (c) 2008 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
* *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* *
...@@ -37,15 +39,18 @@ ...@@ -37,15 +39,18 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <acpi/acpi.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
#include <asm/uaccess.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/jiffies.h>
#include <linux/semaphore.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
#define _COMPONENT ACPI_OS_SERVICES #define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl"); ACPI_MODULE_NAME("osl");
...@@ -764,7 +769,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) ...@@ -764,7 +769,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{ {
struct semaphore *sem = NULL; struct semaphore *sem = NULL;
sem = acpi_os_allocate(sizeof(struct semaphore)); sem = acpi_os_allocate(sizeof(struct semaphore));
if (!sem) if (!sem)
return AE_NO_MEMORY; return AE_NO_MEMORY;
...@@ -791,12 +795,12 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle) ...@@ -791,12 +795,12 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
{ {
struct semaphore *sem = (struct semaphore *)handle; struct semaphore *sem = (struct semaphore *)handle;
if (!sem) if (!sem)
return AE_BAD_PARAMETER; return AE_BAD_PARAMETER;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
BUG_ON(!list_empty(&sem->wait_list));
kfree(sem); kfree(sem);
sem = NULL; sem = NULL;
...@@ -804,21 +808,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle) ...@@ -804,21 +808,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
} }
/* /*
* TODO: The kernel doesn't have a 'down_timeout' function -- had to
* improvise. The process is to sleep for one scheduler quantum
* until the semaphore becomes available. Downside is that this
* may result in starvation for timeout-based waits when there's
* lots of semaphore activity.
*
* TODO: Support for units > 1? * TODO: Support for units > 1?
*/ */
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
struct semaphore *sem = (struct semaphore *)handle; struct semaphore *sem = (struct semaphore *)handle;
long jiffies;
int ret = 0; int ret = 0;
if (!sem || (units < 1)) if (!sem || (units < 1))
return AE_BAD_PARAMETER; return AE_BAD_PARAMETER;
...@@ -828,58 +826,14 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) ...@@ -828,58 +826,14 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout)); handle, units, timeout));
/* if (timeout == ACPI_WAIT_FOREVER)
* This can be called during resume with interrupts off. jiffies = MAX_SCHEDULE_TIMEOUT;
* Like boot-time, we should be single threaded and will else
* always get the lock if we try -- timeout or not. jiffies = msecs_to_jiffies(timeout);
* If this doesn't succeed, then we will oops courtesy of
* might_sleep() in down(). ret = down_timeout(sem, jiffies);
*/ if (ret)
if (!down_trylock(sem)) status = AE_TIME;
return AE_OK;
switch (timeout) {
/*
* No Wait:
* --------
* A zero timeout value indicates that we shouldn't wait - just
* acquire the semaphore if available otherwise return AE_TIME
* (a.k.a. 'would block').
*/
case 0:
if (down_trylock(sem))
status = AE_TIME;
break;
/*
* Wait Indefinitely:
* ------------------
*/
case ACPI_WAIT_FOREVER:
down(sem);
break;
/*
* Wait w/ Timeout:
* ----------------
*/
default:
// TODO: A better timeout algorithm?
{
int i = 0;
static const int quantum_ms = 1000 / HZ;
ret = down_trylock(sem);
for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
schedule_timeout_interruptible(1);
ret = down_trylock(sem);
}
if (ret != 0)
status = AE_TIME;
}
break;
}
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
...@@ -902,7 +856,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) ...@@ -902,7 +856,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
{ {
struct semaphore *sem = (struct semaphore *)handle; struct semaphore *sem = (struct semaphore *)handle;
if (!sem || (units < 1)) if (!sem || (units < 1))
return AE_BAD_PARAMETER; return AE_BAD_PARAMETER;
......
#ifndef _ALPHA_SEMAPHORE_H #include <linux/semaphore.h>
#define _ALPHA_SEMAPHORE_H
/*
* SMP- and interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1996, 2000 Richard Henderson
*/
#include <asm/current.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/compiler.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
atomic_t count;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
static inline void sema_init(struct semaphore *sem, int val)
{
/*
* Logically,
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
* except that gcc produces better initializing by parts yet.
*/
atomic_set(&sem->count, val);
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
extern void down(struct semaphore *);
extern void __down_failed(struct semaphore *);
extern int down_interruptible(struct semaphore *);
extern int __down_failed_interruptible(struct semaphore *);
extern int down_trylock(struct semaphore *);
extern void up(struct semaphore *);
extern void __up_wakeup(struct semaphore *);
/*
* Hidden out of line code is fun, but extremely messy. Rely on newer
* compilers to do a respectable job with this. The contention cases
* are handled out of line in arch/alpha/kernel/semaphore.c.
*/
static inline void __down(struct semaphore *sem)
{
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
__down_failed(sem);
}
static inline int __down_interruptible(struct semaphore *sem)
{
long count;
might_sleep();
count = atomic_dec_return(&sem->count);
if (unlikely(count < 0))
return __down_failed_interruptible(sem);
return 0;
}
/*
* down_trylock returns 0 on success, 1 if we failed to get the lock.
*/
static inline int __down_trylock(struct semaphore *sem)
{
long ret;
/* "Equivalent" C:
do {
ret = ldl_l;
--ret;
if (ret < 0)
break;
ret = stl_c = ret;
} while (ret == 0);
*/
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,1,%0\n"
" blt %0,2f\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
" mb\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: "=&r" (ret), "=m" (sem->count)
: "m" (sem->count));
return ret < 0;
}
static inline void __up(struct semaphore *sem)
{
if (unlikely(atomic_inc_return(&sem->count) <= 0))
__up_wakeup(sem);
}
#if !defined(CONFIG_DEBUG_SEMAPHORE)
extern inline void down(struct semaphore *sem)
{
__down(sem);
}
extern inline int down_interruptible(struct semaphore *sem)
{
return __down_interruptible(sem);
}
extern inline int down_trylock(struct semaphore *sem)
{
return __down_trylock(sem);
}
extern inline void up(struct semaphore *sem)
{
__up(sem);
}
#endif
#endif
#ifndef ASMARM_SEMAPHORE_HELPER_H
#define ASMARM_SEMAPHORE_HELPER_H
/*
* These two _must_ execute atomically wrt each other.
*/
static inline void wake_one_more(struct semaphore * sem)
{
unsigned long flags;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (atomic_read(&sem->count) <= 0)
sem->waking++;
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
}
static inline int waking_non_zero(struct semaphore *sem)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->waking > 0) {
sem->waking--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking non zero interruptible
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*
* We must undo the sem->count down_interruptible() increment while we are
* protected by the spinlock in order to make this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->waking > 0) {
sem->waking--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_try_lock:
* 1 failed to lock
* 0 got the lock
*
* We must undo the sem->count down_interruptible() increment while we are
* protected by the spinlock in order to make this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static inline int waking_non_zero_trylock(struct semaphore *sem)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->waking <= 0)
atomic_inc(&sem->count);
else {
sem->waking--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
#endif
/* #include <linux/semaphore.h>
* linux/include/asm-arm/semaphore.h
*/
#ifndef __ASM_ARM_SEMAPHORE_H
#define __ASM_ARM_SEMAPHORE_H
#include <linux/linkage.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
#include <asm/locks.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INIT(name, cnt) \
{ \
.count = ATOMIC_INIT(cnt), \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INIT(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
static inline void sema_init(struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX(struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
{
sema_init(sem, 0);
}
/*
* special register calling convention
*/
asmlinkage void __down_failed(void);
asmlinkage int __down_interruptible_failed(void);
asmlinkage int __down_trylock_failed(void);
asmlinkage void __up_wakeup(void);
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
extern int __down_trylock(struct semaphore * sem);
extern void __up(struct semaphore * sem);
/*
* This is ugly, but we want the default case to fall through.
* "__down" is the actual routine that waits...
*/
static inline void down(struct semaphore * sem)
{
might_sleep();
__down_op(sem, __down_failed);
}
/*
* This is ugly, but we want the default case to fall through.
* "__down_interruptible" is the actual routine that waits...
*/
static inline int down_interruptible (struct semaphore * sem)
{
might_sleep();
return __down_op_ret(sem, __down_interruptible_failed);
}
static inline int down_trylock(struct semaphore *sem)
{
return __down_op_ret(sem, __down_trylock_failed);
}
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
static inline void up(struct semaphore * sem)
{
__up_op(sem, __up_wakeup);
}
#endif
/* #include <linux/semaphore.h>
* SMP- and interrupt-safe semaphores.
*
* Copyright (C) 2006 Atmel Corporation
*
* Based on include/asm-i386/semaphore.h
* Copyright (C) 1996 Linus Torvalds
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_AVR32_SEMAPHORE_H
#define __ASM_AVR32_SEMAPHORE_H
#include <linux/linkage.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
void __down(struct semaphore * sem);
int __down_interruptible(struct semaphore * sem);
void __up(struct semaphore * sem);
/*
* This is ugly, but we want the default case to fall through.
* "__down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/kernel/semaphore.c
*/
static inline void down(struct semaphore * sem)
{
might_sleep();
if (unlikely(atomic_dec_return (&sem->count) < 0))
__down (sem);
}
/*
* Interruptible try to acquire a semaphore. If we obtained
* it, return zero. If we were interrupted, returns -EINTR
*/
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
might_sleep();
if (unlikely(atomic_dec_return (&sem->count) < 0))
ret = __down_interruptible (sem);
return ret;
}
/*
* Non-blockingly attempt to down() a semaphore.
* Returns zero if we acquired it
*/
static inline int down_trylock(struct semaphore * sem)
{
return atomic_dec_if_positive(&sem->count) < 0;
}
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
static inline void up(struct semaphore * sem)
{
if (unlikely(atomic_inc_return (&sem->count) <= 0))
__up (sem);
}
#endif /*__ASM_AVR32_SEMAPHORE_H */
/* Based on M68K version, Lineo Inc. May 2001 */
#ifndef _BFIN_SEMAPHORE_HELPER_H
#define _BFIN_SEMAPHORE_HELPER_H
/*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
*
*/
#include <asm/errno.h>
/*
* These two _must_ execute atomically wrt each other.
*/
static inline void wake_one_more(struct semaphore *sem)
{
atomic_inc(&sem->waking);
}
static inline int waking_non_zero(struct semaphore *sem)
{
int ret;
unsigned long flags = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
ret = 0;
if (atomic_read(&sem->waking) > 0) {
atomic_dec(&sem->waking);
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_interruptible:
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*/
static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
int ret = 0;
unsigned long flags = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (atomic_read(&sem->waking) > 0) {
atomic_dec(&sem->waking);
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_trylock:
* 1 failed to lock
* 0 got the lock
*/
static inline int waking_non_zero_trylock(struct semaphore *sem)
{
int ret = 1;
unsigned long flags = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (atomic_read(&sem->waking) > 0) {
atomic_dec(&sem->waking);
ret = 0;
} else
atomic_inc(&sem->count);
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
#endif /* _BFIN_SEMAPHORE_HELPER_H */
#ifndef _BFIN_SEMAPHORE_H #include <linux/semaphore.h>
#define _BFIN_SEMAPHORE_H
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
/*
* Interrupt-safe semaphores..
*
* (C) Copyright 1996 Linus Torvalds
*
* BFIN version by akbar hussain Lineo Inc April 2001
*
*/
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.sleepers = 0, \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
static inline void sema_init(struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
}
static inline void init_MUTEX(struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED(struct semaphore *sem)
{
sema_init(sem, 0);
}
asmlinkage void __down(struct semaphore *sem);
asmlinkage int __down_interruptible(struct semaphore *sem);
asmlinkage int __down_trylock(struct semaphore *sem);
asmlinkage void __up(struct semaphore *sem);
extern spinlock_t semaphore_wake_lock;
/*
* This is ugly, but we want the default case to fall through.
* "down_failed" is a special asm handler that calls the C
* routine that actually waits.
*/
static inline void down(struct semaphore *sem)
{
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
static inline int down_interruptible(struct semaphore *sem)
{
int ret = 0;
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return (ret);
}
static inline int down_trylock(struct semaphore *sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
}
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
static inline void up(struct semaphore *sem)
{
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
#endif /* __ASSEMBLY__ */
#endif /* _BFIN_SEMAPHORE_H */
/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
*
* SMP- and interrupt-safe semaphores helper functions. Generic versions, no
* optimizations whatsoever...
*
*/
#ifndef _ASM_SEMAPHORE_HELPER_H
#define _ASM_SEMAPHORE_HELPER_H
#include <asm/atomic.h>
#include <linux/errno.h>
#define read(a) ((a)->counter)
#define inc(a) (((a)->counter)++)
#define dec(a) (((a)->counter)--)
#define count_inc(a) ((*(a))++)
/*
* These two _must_ execute atomically wrt each other.
*/
static inline void wake_one_more(struct semaphore * sem)
{
atomic_inc(&sem->waking);
}
static inline int waking_non_zero(struct semaphore *sem)
{
unsigned long flags;
int ret = 0;
local_irq_save(flags);
if (read(&sem->waking) > 0) {
dec(&sem->waking);
ret = 1;
}
local_irq_restore(flags);
return ret;
}
static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
if (read(&sem->waking) > 0) {
dec(&sem->waking);
ret = 1;
} else if (signal_pending(tsk)) {
inc(&sem->count);
ret = -EINTR;
}
local_irq_restore(flags);
return ret;
}
static inline int waking_non_zero_trylock(struct semaphore *sem)
{
int ret = 1;
unsigned long flags;
local_irq_save(flags);
if (read(&sem->waking) <= 0)
inc(&sem->count);
else {
dec(&sem->waking);
ret = 0;
}
local_irq_restore(flags);
return ret;
}
#endif /* _ASM_SEMAPHORE_HELPER_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment