Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
89808123
Commit
89808123
authored
Aug 20, 2002
by
James Simmons
Browse files
Options
Browse Files
Download
Plain Diff
Merge maxwell.earthlink.net:/usr/src/linus-2.5
into maxwell.earthlink.net:/usr/src/fbdev-2.5
parents
f9d7da1c
d17e9bb6
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
53 additions
and
1000 deletions
+53
-1000
drivers/ieee1394/ieee1394_core.c
drivers/ieee1394/ieee1394_core.c
+0
-1
include/asm-alpha/smplock.h
include/asm-alpha/smplock.h
+0
-56
include/asm-arm/smplock.h
include/asm-arm/smplock.h
+0
-60
include/asm-cris/smplock.h
include/asm-cris/smplock.h
+0
-25
include/asm-generic/smplock.h
include/asm-generic/smplock.h
+0
-50
include/asm-i386/smplock.h
include/asm-i386/smplock.h
+0
-58
include/asm-ia64/smplock.h
include/asm-ia64/smplock.h
+0
-58
include/asm-m68k/smplock.h
include/asm-m68k/smplock.h
+0
-51
include/asm-mips/smplock.h
include/asm-mips/smplock.h
+0
-54
include/asm-mips64/smplock.h
include/asm-mips64/smplock.h
+0
-56
include/asm-parisc/smplock.h
include/asm-parisc/smplock.h
+0
-49
include/asm-ppc/smplock.h
include/asm-ppc/smplock.h
+0
-68
include/asm-ppc64/smplock.h
include/asm-ppc64/smplock.h
+0
-55
include/asm-s390/smplock.h
include/asm-s390/smplock.h
+0
-62
include/asm-s390x/smplock.h
include/asm-s390x/smplock.h
+0
-62
include/asm-sh/smplock.h
include/asm-sh/smplock.h
+0
-23
include/asm-sparc/smplock.h
include/asm-sparc/smplock.h
+0
-55
include/asm-sparc64/smplock.h
include/asm-sparc64/smplock.h
+0
-60
include/asm-x86_64/smplock.h
include/asm-x86_64/smplock.h
+0
-95
include/linux/smp_lock.h
include/linux/smp_lock.h
+53
-1
mm/rmap.c
mm/rmap.c
+0
-1
No files found.
drivers/ieee1394/ieee1394_core.c
View file @
89808123
...
...
@@ -22,7 +22,6 @@
#include <asm/bitops.h>
#include <asm/byteorder.h>
#include <asm/semaphore.h>
#include <asm/smplock.h>
#include "ieee1394_types.h"
#include "ieee1394.h"
...
...
include/asm-alpha/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static
__inline__
void
release_kernel_lock
(
struct
task_struct
*
task
)
{
if
(
unlikely
(
task
->
lock_depth
>=
0
))
spin_unlock
(
&
kernel_flag
);
}
/*
* Re-acquire the kernel lock
*/
static
__inline__
void
reacquire_kernel_lock
(
struct
task_struct
*
task
)
{
if
(
unlikely
(
task
->
lock_depth
>=
0
))
spin_lock
(
&
kernel_flag
);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
#ifdef CONFIG_PREEMPT
if
(
current
->
lock_depth
==
-
1
)
spin_lock
(
&
kernel_flag
);
++
current
->
lock_depth
;
#else
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
#endif
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-arm/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() spin_is_locked(&kernel_flag)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
inline
void
lock_kernel
(
void
)
{
#ifdef CONFIG_PREEMPT
if
(
current
->
lock_depth
==
-
1
)
spin_lock
(
&
kernel_flag
);
++
current
->
lock_depth
;
#else
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
#endif
}
static
inline
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-cris/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
#ifndef __ASM_CRIS_SMPLOCK_H
#define __ASM_CRIS_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on CRIS"
#endif
#endif
include/asm-generic/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-i386/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
int
depth
=
current
->
lock_depth
+
1
;
if
(
!
depth
)
get_kernel_lock
();
current
->
lock_depth
=
depth
;
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
current
->
lock_depth
<
0
)
BUG
();
if
(
--
current
->
lock_depth
<
0
)
put_kernel_lock
();
}
include/asm-ia64/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/hardirq.h>
extern
spinlock_t
kernel_flag
;
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
#else
# define kernel_locked() (1)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-m68k/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-mips/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-mips64/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifndef _ASM_SMPLOCK_H
#define _ASM_SMPLOCK_H
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static
__inline__
void
release_kernel_lock
(
struct
task_struct
*
task
,
int
cpu
)
{
if
(
task
->
lock_depth
>=
0
)
spin_unlock
(
&
kernel_flag
);
release_irqlock
(
cpu
);
local_irq_enable
();
}
/*
* Re-acquire the kernel lock
*/
static
__inline__
void
reacquire_kernel_lock
(
struct
task_struct
*
task
)
{
if
(
task
->
lock_depth
>=
0
)
spin_lock
(
&
kernel_flag
);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
#endif
/* _ASM_SMPLOCK_H */
include/asm-parisc/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-ppc/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifdef __KERNEL__
#ifndef __ASM_SMPLOCK_H__
#define __ASM_SMPLOCK_H__
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_count()
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
#ifdef CONFIG_PREEMPT
if
(
current
->
lock_depth
==
-
1
)
spin_lock
(
&
kernel_flag
);
++
current
->
lock_depth
;
#else
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
#endif
/* CONFIG_PREEMPT */
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
#endif
/* __ASM_SMPLOCK_H__ */
#endif
/* __KERNEL__ */
include/asm-ppc64/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
current
->
lock_depth
<
0
)
BUG
();
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-s390/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-s390x/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
}
include/asm-sh/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
#ifndef __ASM_SH_SMPLOCK_H
#define __ASM_SH_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on SH"
#endif
/* CONFIG_SMP */
#endif
/* __ASM_SH_SMPLOCK_H */
include/asm-sparc/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
include/asm-sparc64/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern
spinlock_t
kernel_flag
;
#ifdef CONFIG_SMP
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() 1
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
include/asm-x86_64/smplock.h
deleted
100644 → 0
View file @
f9d7da1c
/*
* <asm/smplock.h>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern
spinlock_t
kernel_flag
;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#define global_irq_holder 0
#define check_irq_holder(cpu) do {} while(0)
#else
#define kernel_locked() 1
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern
__inline__
void
lock_kernel
(
void
)
{
#ifdef CONFIG_PREEMPT
if
(
current
->
lock_depth
==
-
1
)
spin_lock
(
&
kernel_flag
);
++
current
->
lock_depth
;
#else
#if 1
if
(
!++
current
->
lock_depth
)
spin_lock
(
&
kernel_flag
);
#else
__asm__
__volatile__
(
"incl %1
\n\t
"
"jne 9f"
spin_lock_string
"
\n
9:"
:
"=m"
(
__dummy_lock
(
&
kernel_flag
)),
"=m"
(
current
->
lock_depth
));
#endif
#endif
}
extern
__inline__
void
unlock_kernel
(
void
)
{
if
(
current
->
lock_depth
<
0
)
BUG
();
#if 1
if
(
--
current
->
lock_depth
<
0
)
spin_unlock
(
&
kernel_flag
);
#else
__asm__
__volatile__
(
"decl %1
\n\t
"
"jns 9f
\n\t
"
spin_unlock_string
"
\n
9:"
:
"=m"
(
__dummy_lock
(
&
kernel_flag
)),
"=m"
(
current
->
lock_depth
));
#endif
}
include/linux/smp_lock.h
View file @
89808123
...
...
@@ -13,7 +13,59 @@
#else
#include <asm/smplock.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern
spinlock_t
kernel_flag
;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static
__inline__
void
lock_kernel
(
void
)
{
int
depth
=
current
->
lock_depth
+
1
;
if
(
!
depth
)
get_kernel_lock
();
current
->
lock_depth
=
depth
;
}
static
__inline__
void
unlock_kernel
(
void
)
{
if
(
current
->
lock_depth
<
0
)
BUG
();
if
(
--
current
->
lock_depth
<
0
)
put_kernel_lock
();
}
#endif
/* CONFIG_SMP */
...
...
mm/rmap.c
View file @
89808123
...
...
@@ -28,7 +28,6 @@
#include <asm/pgalloc.h>
#include <asm/rmap.h>
#include <asm/smplock.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment