Commit 609c92fe authored by Paul Moore's avatar Paul Moore Committed by David S. Miller

[NetLabel]: make the CIPSOv4 cache spinlocks bottom half safe

The CIPSOv4 cache traversal routines are triggered both the userspace events
(cache invalidation due to DOI removal or updated SELinux policy) and network
packet processing events.  As a result there is a problem with the existing
CIPSOv4 cache spinlocks as they are not bottom-half/softirq safe.  This patch
converts the CIPSOv4 cache spin_[un]lock() calls into spin_[un]lock_bh() calls
to address this problem.
Signed-off-by: default avatarPaul Moore <paul.moore@hp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 14a72f53
......@@ -259,7 +259,7 @@ void cipso_v4_cache_invalidate(void)
u32 iter;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
spin_lock(&cipso_v4_cache[iter].lock);
spin_lock_bh(&cipso_v4_cache[iter].lock);
list_for_each_entry_safe(entry,
tmp_entry,
&cipso_v4_cache[iter].list, list) {
......@@ -267,7 +267,7 @@ void cipso_v4_cache_invalidate(void)
cipso_v4_cache_entry_free(entry);
}
cipso_v4_cache[iter].size = 0;
spin_unlock(&cipso_v4_cache[iter].lock);
spin_unlock_bh(&cipso_v4_cache[iter].lock);
}
return;
......@@ -309,7 +309,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
hash = cipso_v4_map_cache_hash(key, key_len);
bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
spin_lock(&cipso_v4_cache[bkt].lock);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash &&
entry->key_len == key_len &&
......@@ -318,7 +318,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
secattr->cache.free = entry->lsm_data.free;
secattr->cache.data = entry->lsm_data.data;
if (prev_entry == NULL) {
spin_unlock(&cipso_v4_cache[bkt].lock);
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
......@@ -333,12 +333,12 @@ static int cipso_v4_cache_check(const unsigned char *key,
&prev_entry->list);
}
spin_unlock(&cipso_v4_cache[bkt].lock);
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
prev_entry = entry;
}
spin_unlock(&cipso_v4_cache[bkt].lock);
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return -ENOENT;
}
......@@ -387,7 +387,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
entry->lsm_data.data = secattr->cache.data;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
spin_lock(&cipso_v4_cache[bkt].lock);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache[bkt].size += 1;
......@@ -398,7 +398,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache_entry_free(old_entry);
}
spin_unlock(&cipso_v4_cache[bkt].lock);
spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment