Commit ae9cad56 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

[PATCH] Lock initializer cleanup: Security

Use the new lock initializers DEFINE_SPIN_LOCK and DEFINE_RW_LOCK
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fd42b1cf
......@@ -20,10 +20,10 @@
static kmem_cache_t *key_jar;
static key_serial_t key_serial_next = 3;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
spinlock_t key_serial_lock = SPIN_LOCK_UNLOCKED;
DEFINE_SPINLOCK(key_serial_lock);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
spinlock_t key_user_lock = SPIN_LOCK_UNLOCKED;
DEFINE_SPINLOCK(key_user_lock);
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
......
......@@ -30,7 +30,7 @@
#define KEYRING_NAME_HASH_SIZE (1 << 5)
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static rwlock_t keyring_name_lock = RW_LOCK_UNLOCKED;
static DEFINE_RWLOCK(keyring_name_lock);
static inline unsigned keyring_hash(const char *desc)
{
......
......@@ -227,7 +227,7 @@ void __init avc_init(void)
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
INIT_LIST_HEAD(&avc_cache.slots[i]);
avc_cache.slots_lock[i] = SPIN_LOCK_UNLOCKED;
spin_lock_init(&avc_cache.slots_lock[i]);
}
atomic_set(&avc_cache.active_nodes, 0);
atomic_set(&avc_cache.lru_hint, 0);
......@@ -415,7 +415,7 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested
static int avc_latest_notif_update(int seqno, int is_insert)
{
int ret = 0;
static spinlock_t notif_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(notif_lock);
unsigned long flag;
spin_lock_irqsave(&notif_lock, flag);
......
......@@ -110,7 +110,7 @@ static struct security_operations *secondary_ops = NULL;
/* Lists of inode and superblock security structures initialized
before the policy was loaded. */
static LIST_HEAD(superblock_security_head);
static spinlock_t sb_security_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(sb_security_lock);
/* Allocate and free functions for each kind of security blob. */
......
......@@ -45,7 +45,7 @@ struct sel_netif
static u32 sel_netif_total;
static LIST_HEAD(sel_netif_list);
static spinlock_t sel_netif_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(sel_netif_lock);
static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
static inline u32 sel_netif_hasfn(struct net_device *dev)
......
......@@ -42,7 +42,7 @@
extern void selnl_notify_policyload(u32 seqno);
unsigned int policydb_loaded_version;
static rwlock_t policy_rwlock = RW_LOCK_UNLOCKED;
static DEFINE_RWLOCK(policy_rwlock);
#define POLICY_RDLOCK read_lock(&policy_rwlock)
#define POLICY_WRLOCK write_lock_irq(&policy_rwlock)
#define POLICY_RDUNLOCK read_unlock(&policy_rwlock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment