Commit f9732ea1 authored by Tetsuo Handa's avatar Tetsuo Handa Committed by James Morris

TOMOYO: Simplify garbage collector.

When TOMOYO started using garbage collector at commit 847b173e "TOMOYO: Add
garbage collector.", we waited for close() before kfree(). Thus, elements to be
kfree()d were queued up using tomoyo_gc_list list.

But it turned out that tomoyo_element_linked_by_gc() tends to choke garbage
collector when certain pattern of entries are queued.

Since garbage collector is no longer waiting for close() since commit 2e503bbb
"TOMOYO: Fix lockdep warning.", we can remove tomoyo_gc_list list and
tomoyo_element_linked_by_gc() by doing sequential processing.
Signed-off-by: default avatarTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: default avatarJames Morris <jmorris@namei.org>
parent 778c4a4d
......@@ -52,6 +52,9 @@
#define TOMOYO_EXEC_TMPSIZE 4096
/* Garbage collector is trying to kfree() this element. */
#define TOMOYO_GC_IN_PROGRESS -1
/* Profile number is an integer between 0 and 255. */
#define TOMOYO_MAX_PROFILES 256
......@@ -398,7 +401,7 @@ enum tomoyo_pref_index {
/* Common header for holding ACL entries. */
struct tomoyo_acl_head {
struct list_head list;
bool is_deleted;
s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
} __packed;
/* Common header for shared entries. */
......@@ -665,7 +668,7 @@ struct tomoyo_condition {
struct tomoyo_acl_info {
struct list_head list;
struct tomoyo_condition *cond; /* Maybe NULL. */
bool is_deleted;
s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
} __packed;
......
......@@ -400,8 +400,9 @@ static struct tomoyo_condition *tomoyo_commit_condition
found = true;
goto out;
}
list_for_each_entry_rcu(ptr, &tomoyo_condition_list, head.list) {
if (!tomoyo_same_condition(ptr, entry))
list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
if (!tomoyo_same_condition(ptr, entry) ||
atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
/* Same entry found. Share this entry. */
atomic_inc(&ptr->head.users);
......@@ -411,8 +412,7 @@ static struct tomoyo_condition *tomoyo_commit_condition
if (!found) {
if (tomoyo_memory_ok(entry)) {
atomic_set(&entry->head.users, 1);
list_add_rcu(&entry->head.list,
&tomoyo_condition_list);
list_add(&entry->head.list, &tomoyo_condition_list);
} else {
found = true;
ptr = NULL;
......
......@@ -39,6 +39,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
list_for_each_entry_rcu(entry, list, list) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!check_duplicate(entry, new_entry))
continue;
entry->is_deleted = param->is_delete;
......@@ -115,6 +117,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
list_for_each_entry_rcu(entry, list, list) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
!check_duplicate(entry, new_entry))
continue;
......
......@@ -13,35 +13,6 @@ static LIST_HEAD(tomoyo_io_buffer_list);
/* Lock for protecting tomoyo_io_buffer_list. */
static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
/* Size of an element. */
static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
[TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
[TOMOYO_ID_ADDRESS_GROUP] = sizeof(struct tomoyo_address_group),
[TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
[TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
[TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
[TOMOYO_ID_TRANSITION_CONTROL] =
sizeof(struct tomoyo_transition_control),
[TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
/* [TOMOYO_ID_CONDITION] = "struct tomoyo_condition"->size, */
/* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
/* [TOMOYO_ID_ACL] =
tomoyo_acl_size["struct tomoyo_acl_info"->type], */
[TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
};
/* Size of a domain ACL element. */
static const u8 tomoyo_acl_size[] = {
[TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
[TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
[TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
[TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
[TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
[TOMOYO_TYPE_INET_ACL] = sizeof(struct tomoyo_inet_acl),
[TOMOYO_TYPE_UNIX_ACL] = sizeof(struct tomoyo_unix_acl),
[TOMOYO_TYPE_ENV_ACL] = sizeof(struct tomoyo_env_acl),
};
/**
* tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
*
......@@ -59,15 +30,11 @@ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
if (mutex_lock_interruptible(&head->io_sem)) {
in_use = true;
goto out;
}
mutex_lock(&head->io_sem);
if (head->r.domain == element || head->r.group == element ||
head->r.acl == element || &head->w.domain->list == element)
in_use = true;
mutex_unlock(&head->io_sem);
out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
......@@ -81,15 +48,14 @@ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
* tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
*
* @string: String to check.
* @size: Memory allocated for @string .
*
* Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
* false otherwise.
*/
static bool tomoyo_name_used_by_io_buffer(const char *string,
const size_t size)
static bool tomoyo_name_used_by_io_buffer(const char *string)
{
struct tomoyo_io_buffer *head;
const size_t size = strlen(string) + 1;
bool in_use = false;
spin_lock(&tomoyo_io_buffer_list_lock);
......@@ -97,10 +63,7 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
int i;
head->users++;
spin_unlock(&tomoyo_io_buffer_list_lock);
if (mutex_lock_interruptible(&head->io_sem)) {
in_use = true;
goto out;
}
mutex_lock(&head->io_sem);
for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
const char *w = head->r.w[i];
if (w < string || w > string + size)
......@@ -109,7 +72,6 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
break;
}
mutex_unlock(&head->io_sem);
out:
spin_lock(&tomoyo_io_buffer_list_lock);
head->users--;
if (in_use)
......@@ -119,84 +81,6 @@ static bool tomoyo_name_used_by_io_buffer(const char *string,
return in_use;
}
/* Structure for garbage collection. */
struct tomoyo_gc {
struct list_head list;
enum tomoyo_policy_id type;
size_t size;
struct list_head *element;
};
/* List of entries to be deleted. */
static LIST_HEAD(tomoyo_gc_list);
/* Length of tomoyo_gc_list. */
static int tomoyo_gc_list_len;
/**
* tomoyo_add_to_gc - Add an entry to to be deleted list.
*
* @type: One of values in "enum tomoyo_policy_id".
* @element: Pointer to "struct list_head".
*
* Returns true on success, false otherwise.
*
* Caller holds tomoyo_policy_lock mutex.
*
* Adding an entry needs kmalloc(). Thus, if we try to add thousands of
* entries at once, it will take too long time. Thus, do not add more than 128
* entries per a scan. But to be able to handle worst case where all entries
* are in-use, we accept one more entry per a scan.
*
* If we use singly linked list using "struct list_head"->prev (which is
* LIST_POISON2), we can avoid kmalloc().
*/
static bool tomoyo_add_to_gc(const int type, struct list_head *element)
{
struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return false;
entry->type = type;
if (type == TOMOYO_ID_ACL)
entry->size = tomoyo_acl_size[
container_of(element,
typeof(struct tomoyo_acl_info),
list)->type];
else if (type == TOMOYO_ID_NAME)
entry->size = strlen(container_of(element,
typeof(struct tomoyo_name),
head.list)->entry.name) + 1;
else if (type == TOMOYO_ID_CONDITION)
entry->size =
container_of(element, typeof(struct tomoyo_condition),
head.list)->size;
else
entry->size = tomoyo_element_size[type];
entry->element = element;
list_add(&entry->list, &tomoyo_gc_list);
list_del_rcu(element);
return tomoyo_gc_list_len++ < 128;
}
/**
* tomoyo_element_linked_by_gc - Validate next element of an entry.
*
* @element: Pointer to an element.
* @size: Size of @element in byte.
*
* Returns true if @element is linked by other elements in the garbage
* collector's queue, false otherwise.
*/
static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
{
struct tomoyo_gc *p;
list_for_each_entry(p, &tomoyo_gc_list, list) {
const u8 *ptr = (const u8 *) p->element->next;
if (ptr < element || element + size < ptr)
continue;
return true;
}
return false;
}
/**
* tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
*
......@@ -204,7 +88,7 @@ static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
*
* Returns nothing.
*/
static void tomoyo_del_transition_control(struct list_head *element)
static inline void tomoyo_del_transition_control(struct list_head *element)
{
struct tomoyo_transition_control *ptr =
container_of(element, typeof(*ptr), head.list);
......@@ -219,7 +103,7 @@ static void tomoyo_del_transition_control(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_aggregator(struct list_head *element)
static inline void tomoyo_del_aggregator(struct list_head *element)
{
struct tomoyo_aggregator *ptr =
container_of(element, typeof(*ptr), head.list);
......@@ -234,7 +118,7 @@ static void tomoyo_del_aggregator(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_manager(struct list_head *element)
static inline void tomoyo_del_manager(struct list_head *element)
{
struct tomoyo_manager *ptr =
container_of(element, typeof(*ptr), head.list);
......@@ -330,44 +214,24 @@ static void tomoyo_del_acl(struct list_head *element)
*
* @element: Pointer to "struct list_head".
*
* Returns true if deleted, false otherwise.
* Returns nothing.
*/
static bool tomoyo_del_domain(struct list_head *element)
static inline void tomoyo_del_domain(struct list_head *element)
{
struct tomoyo_domain_info *domain =
container_of(element, typeof(*domain), list);
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
/*
* Since we don't protect whole execve() operation using SRCU,
* we need to recheck domain->users at this point.
*
* (1) Reader starts SRCU section upon execve().
* (2) Reader traverses tomoyo_domain_list and finds this domain.
* (3) Writer marks this domain as deleted.
* (4) Garbage collector removes this domain from tomoyo_domain_list
* because this domain is marked as deleted and used by nobody.
* (5) Reader saves reference to this domain into
* "struct linux_binprm"->cred->security .
* (6) Reader finishes SRCU section, although execve() operation has
* not finished yet.
* (7) Garbage collector waits for SRCU synchronization.
* (8) Garbage collector kfree() this domain because this domain is
* used by nobody.
* (9) Reader finishes execve() operation and restores this domain from
* "struct linux_binprm"->cred->security.
*
* By updating domain->users at (5), we can solve this race problem
* by rechecking domain->users at (8).
* Since this domain is referenced from neither
* "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
* elements without checking for is_deleted flag.
*/
if (atomic_read(&domain->users))
return false;
list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
tomoyo_del_acl(&acl->list);
tomoyo_memory_free(acl);
}
tomoyo_put_name(domain->domainname);
return true;
}
/**
......@@ -416,10 +280,9 @@ void tomoyo_del_condition(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_name(struct list_head *element)
static inline void tomoyo_del_name(struct list_head *element)
{
const struct tomoyo_name *ptr =
container_of(element, typeof(*ptr), head.list);
/* Nothing to do. */
}
/**
......@@ -429,7 +292,7 @@ static void tomoyo_del_name(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_path_group(struct list_head *element)
static inline void tomoyo_del_path_group(struct list_head *element)
{
struct tomoyo_path_group *member =
container_of(element, typeof(*member), head.list);
......@@ -443,7 +306,7 @@ static void tomoyo_del_path_group(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_group(struct list_head *element)
static inline void tomoyo_del_group(struct list_head *element)
{
struct tomoyo_group *group =
container_of(element, typeof(*group), head.list);
......@@ -469,10 +332,109 @@ static inline void tomoyo_del_address_group(struct list_head *element)
*
* Returns nothing.
*/
static void tomoyo_del_number_group(struct list_head *element)
static inline void tomoyo_del_number_group(struct list_head *element)
{
struct tomoyo_number_group *member =
container_of(element, typeof(*member), head.list);
/* Nothing to do. */
}
/**
* tomoyo_try_to_gc - Try to kfree() an entry.
*
* @type: One of values in "enum tomoyo_policy_id".
* @element: Pointer to "struct list_head".
*
* Returns nothing.
*
* Caller holds tomoyo_policy_lock mutex.
*/
static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
struct list_head *element)
{
/*
* __list_del_entry() guarantees that the list element became no longer
* reachable from the list which the element was originally on (e.g.
* tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
* list element became no longer referenced by syscall users.
*/
__list_del_entry(element);
mutex_unlock(&tomoyo_policy_lock);
synchronize_srcu(&tomoyo_ss);
/*
* However, there are two users which may still be using the list
* element. We need to defer until both users forget this element.
*
* Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
* and "struct tomoyo_io_buffer"->w.domain forget this element.
*/
if (tomoyo_struct_used_by_io_buffer(element))
goto reinject;
switch (type) {
case TOMOYO_ID_TRANSITION_CONTROL:
tomoyo_del_transition_control(element);
break;
case TOMOYO_ID_MANAGER:
tomoyo_del_manager(element);
break;
case TOMOYO_ID_AGGREGATOR:
tomoyo_del_aggregator(element);
break;
case TOMOYO_ID_GROUP:
tomoyo_del_group(element);
break;
case TOMOYO_ID_PATH_GROUP:
tomoyo_del_path_group(element);
break;
case TOMOYO_ID_ADDRESS_GROUP:
tomoyo_del_address_group(element);
break;
case TOMOYO_ID_NUMBER_GROUP:
tomoyo_del_number_group(element);
break;
case TOMOYO_ID_CONDITION:
tomoyo_del_condition(element);
break;
case TOMOYO_ID_NAME:
/*
* Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
* forget this element.
*/
if (tomoyo_name_used_by_io_buffer
(container_of(element, typeof(struct tomoyo_name),
head.list)->entry.name))
goto reinject;
tomoyo_del_name(element);
break;
case TOMOYO_ID_ACL:
tomoyo_del_acl(element);
break;
case TOMOYO_ID_DOMAIN:
/*
* Don't kfree() until all "struct cred"->security forget this
* element.
*/
if (atomic_read(&container_of
(element, typeof(struct tomoyo_domain_info),
list)->users))
goto reinject;
tomoyo_del_domain(element);
break;
case TOMOYO_MAX_POLICY:
break;
}
mutex_lock(&tomoyo_policy_lock);
tomoyo_memory_free(element);
return;
reinject:
/*
* We can safely reinject this element here bacause
* (1) Appending list elements and removing list elements are protected
* by tomoyo_policy_lock mutex.
* (2) Only this function removes list elements and this function is
* exclusively executed by tomoyo_gc_mutex mutex.
* are true.
*/
mutex_lock(&tomoyo_policy_lock);
list_add_rcu(element, element->prev);
}
/**
......@@ -481,19 +443,19 @@ static void tomoyo_del_number_group(struct list_head *element)
* @id: One of values in "enum tomoyo_policy_id".
* @member_list: Pointer to "struct list_head".
*
* Returns true if some elements are deleted, false otherwise.
* Returns nothing.
*/
static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
static void tomoyo_collect_member(const enum tomoyo_policy_id id,
struct list_head *member_list)
{
struct tomoyo_acl_head *member;
list_for_each_entry(member, member_list, list) {
struct tomoyo_acl_head *tmp;
list_for_each_entry_safe(member, tmp, member_list, list) {
if (!member->is_deleted)
continue;
if (!tomoyo_add_to_gc(id, &member->list))
return false;
member->is_deleted = TOMOYO_GC_IN_PROGRESS;
tomoyo_try_to_gc(id, &member->list);
}
return true;
}
/**
......@@ -501,22 +463,22 @@ static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
*
* @list: Pointer to "struct list_head".
*
* Returns true if some elements are deleted, false otherwise.
* Returns nothing.
*/
static bool tomoyo_collect_acl(struct list_head *list)
static void tomoyo_collect_acl(struct list_head *list)
{
struct tomoyo_acl_info *acl;
list_for_each_entry(acl, list, list) {
struct tomoyo_acl_info *tmp;
list_for_each_entry_safe(acl, tmp, list, list) {
if (!acl->is_deleted)
continue;
if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
return false;
acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
}
return true;
}
/**
* tomoyo_collect_entry - Scan lists for deleted elements.
* tomoyo_collect_entry - Try to kfree() deleted elements.
*
* Returns nothing.
*/
......@@ -525,36 +487,40 @@ static void tomoyo_collect_entry(void)
int i;
enum tomoyo_policy_id id;
struct tomoyo_policy_namespace *ns;
int idx;
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return;
idx = tomoyo_read_lock();
mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_domain_info *domain;
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
if (!tomoyo_collect_acl(&domain->acl_info_list))
goto unlock;
struct tomoyo_domain_info *tmp;
list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
list) {
tomoyo_collect_acl(&domain->acl_info_list);
if (!domain->is_deleted || atomic_read(&domain->users))
continue;
/*
* Nobody is referring this domain. But somebody may
* refer this domain after successful execve().
* We recheck domain->users after SRCU synchronization.
*/
if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
goto unlock;
tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
}
}
list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) {
list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (id = 0; id < TOMOYO_MAX_POLICY; id++)
if (!tomoyo_collect_member(id, &ns->policy_list[id]))
goto unlock;
tomoyo_collect_member(id, &ns->policy_list[id]);
for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
if (!tomoyo_collect_acl(&ns->acl_group[i]))
goto unlock;
tomoyo_collect_acl(&ns->acl_group[i]);
}
{
struct tomoyo_shared_acl_head *ptr;
struct tomoyo_shared_acl_head *tmp;
list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
list) {
if (atomic_read(&ptr->users) > 0)
continue;
atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
}
}
list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
struct list_head *list = &ns->group_list[i];
struct tomoyo_group *group;
struct tomoyo_group *tmp;
switch (i) {
case 0:
id = TOMOYO_ID_PATH_GROUP;
......@@ -566,139 +532,37 @@ static void tomoyo_collect_entry(void)
id = TOMOYO_ID_ADDRESS_GROUP;
break;
}
list_for_each_entry(group, list, head.list) {
if (!tomoyo_collect_member
(id, &group->member_list))
goto unlock;
list_for_each_entry_safe(group, tmp, list, head.list) {
tomoyo_collect_member(id, &group->member_list);
if (!list_empty(&group->member_list) ||
atomic_read(&group->head.users))
atomic_read(&group->head.users) > 0)
continue;
if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
&group->head.list))
goto unlock;
atomic_set(&group->head.users,
TOMOYO_GC_IN_PROGRESS);
tomoyo_try_to_gc(TOMOYO_ID_GROUP,
&group->head.list);
}
}
}
id = TOMOYO_ID_CONDITION;
for (i = 0; i < TOMOYO_MAX_HASH + 1; i++) {
struct list_head *list = !i ?
&tomoyo_condition_list : &tomoyo_name_list[i - 1];
for (i = 0; i < TOMOYO_MAX_HASH; i++) {
struct list_head *list = &tomoyo_name_list[i];
struct tomoyo_shared_acl_head *ptr;
list_for_each_entry(ptr, list, list) {
if (atomic_read(&ptr->users))
struct tomoyo_shared_acl_head *tmp;
list_for_each_entry_safe(ptr, tmp, list, list) {
if (atomic_read(&ptr->users) > 0)
continue;
if (!tomoyo_add_to_gc(id, &ptr->list))
goto unlock;
atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
}
id = TOMOYO_ID_NAME;
}
unlock:
tomoyo_read_unlock(idx);
mutex_unlock(&tomoyo_policy_lock);
}
/**
* tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
*
* Returns true if some entries were kfree()d, false otherwise.
*/
static bool tomoyo_kfree_entry(void)
{
struct tomoyo_gc *p;
struct tomoyo_gc *tmp;
bool result = false;
list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
struct list_head *element = p->element;
/*
* list_del_rcu() in tomoyo_add_to_gc() guarantees that the
* list element became no longer reachable from the list which
* the element was originally on (e.g. tomoyo_domain_list).
* Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
* that the list element became no longer referenced by syscall
* users.
*
* However, there are three users which may still be using the
* list element. We need to defer until all of these users
* forget the list element.
*
* Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
* group,acl} and "struct tomoyo_io_buffer"->w.domain forget
* the list element.
*/
if (tomoyo_struct_used_by_io_buffer(element))
continue;
/*
* Secondly, defer until all other elements in the
* tomoyo_gc_list list forget the list element.
*/
if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
continue;
switch (p->type) {
case TOMOYO_ID_TRANSITION_CONTROL:
tomoyo_del_transition_control(element);
break;
case TOMOYO_ID_AGGREGATOR:
tomoyo_del_aggregator(element);
break;
case TOMOYO_ID_MANAGER:
tomoyo_del_manager(element);
break;
case TOMOYO_ID_CONDITION:
tomoyo_del_condition(element);
break;
case TOMOYO_ID_NAME:
/*
* Thirdly, defer until all "struct tomoyo_io_buffer"
* ->r.w[] forget the list element.
*/
if (tomoyo_name_used_by_io_buffer(
container_of(element, typeof(struct tomoyo_name),
head.list)->entry.name, p->size))
continue;
tomoyo_del_name(element);
break;
case TOMOYO_ID_ACL:
tomoyo_del_acl(element);
break;
case TOMOYO_ID_DOMAIN:
if (!tomoyo_del_domain(element))
continue;
break;
case TOMOYO_ID_PATH_GROUP:
tomoyo_del_path_group(element);
break;
case TOMOYO_ID_ADDRESS_GROUP:
tomoyo_del_address_group(element);
break;
case TOMOYO_ID_GROUP:
tomoyo_del_group(element);
break;
case TOMOYO_ID_NUMBER_GROUP:
tomoyo_del_number_group(element);
break;
case TOMOYO_MAX_POLICY:
break;
}
tomoyo_memory_free(element);
list_del(&p->list);
kfree(p);
tomoyo_gc_list_len--;
result = true;
}
return result;
}
/**
* tomoyo_gc_thread - Garbage collector thread function.
*
* @unused: Unused.
*
* In case OOM-killer choose this thread for termination, we create this thread
* as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
* close()d.
*
* Returns 0.
*/
static int tomoyo_gc_thread(void *unused)
......@@ -707,13 +571,7 @@ static int tomoyo_gc_thread(void *unused)
static DEFINE_MUTEX(tomoyo_gc_mutex);
if (!mutex_trylock(&tomoyo_gc_mutex))
goto out;
do {
tomoyo_collect_entry();
if (list_empty(&tomoyo_gc_list))
break;
synchronize_srcu(&tomoyo_ss);
} while (tomoyo_kfree_entry());
tomoyo_collect_entry();
{
struct tomoyo_io_buffer *head;
struct tomoyo_io_buffer *tmp;
......
......@@ -123,7 +123,8 @@ struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
goto out;
list = &param->ns->group_list[idx];
list_for_each_entry(group, list, head.list) {
if (e.group_name != group->group_name)
if (e.group_name != group->group_name ||
atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&group->head.users);
found = true;
......@@ -175,7 +176,8 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return NULL;
list_for_each_entry(ptr, head, head.list) {
if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
continue;
atomic_inc(&ptr->head.users);
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment