Commit c3059477 authored by Jarek Poplawski's avatar Jarek Poplawski Committed by David S. Miller

ipv4: Use synchronize_rcu() during trie_rebalance()

During trie_rebalance() we free memory after resizing with call_rcu(),
but large updates, especially with PREEMPT_NONE configs, can cause
memory stresses, so this patch calls synchronize_rcu() in
tnode_free_flush() after each sync_pages to guarantee such freeing
(especially before resizing the root node).

The value of sync_pages = 128 is based on Pawel Staszewski's tests as
the lowest which doesn't hinder updating times. (For testing purposes
there was a sysfs module parameter to change it on demand, but it's
removed until we're sure it could be really useful.)

The patch is based on suggestions by: Paul E. McKenney
<paulmck@linux.vnet.ibm.com>
Reported-by: default avatarPawel Staszewski <pstaszewski@itcare.pl>
Tested-by: default avatarPawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: default avatarJarek Poplawski <jarkao2@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2e477c9b
...@@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn); ...@@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn); static struct tnode *halve(struct trie *t, struct tnode *tn);
/* tnodes to free after resize(); protected by RTNL */ /* tnodes to free after resize(); protected by RTNL */
static struct tnode *tnode_free_head; static struct tnode *tnode_free_head;
static size_t tnode_free_size;
/*
* synchronize_rcu after call_rcu for that many pages; it should be especially
* useful before resizing the root node with PREEMPT_NONE configs; the value was
* obtained experimentally, aiming to avoid visible slowdown.
*/
static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly; static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly; static struct kmem_cache *trie_leaf_kmem __read_mostly;
...@@ -393,6 +401,8 @@ static void tnode_free_safe(struct tnode *tn) ...@@ -393,6 +401,8 @@ static void tnode_free_safe(struct tnode *tn)
BUG_ON(IS_LEAF(tn)); BUG_ON(IS_LEAF(tn));
tn->tnode_free = tnode_free_head; tn->tnode_free = tnode_free_head;
tnode_free_head = tn; tnode_free_head = tn;
tnode_free_size += sizeof(struct tnode) +
(sizeof(struct node *) << tn->bits);
} }
static void tnode_free_flush(void) static void tnode_free_flush(void)
...@@ -404,6 +414,11 @@ static void tnode_free_flush(void) ...@@ -404,6 +414,11 @@ static void tnode_free_flush(void)
tn->tnode_free = NULL; tn->tnode_free = NULL;
tnode_free(tn); tnode_free(tn);
} }
if (tnode_free_size >= PAGE_SIZE * sync_pages) {
tnode_free_size = 0;
synchronize_rcu();
}
} }
static struct leaf *leaf_new(void) static struct leaf *leaf_new(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment