Commit 8eb510db authored by Uladzislau Rezki (Sony)'s avatar Uladzislau Rezki (Sony) Committed by akpm

mm/vmalloc: make link_va()/unlink_va() common to different rb_root

Patch series "Reduce a vmalloc internal lock contention preparation work".

This small serias is preparation work to implement per-cpu vmalloc
allocation in order to reduce a high internal lock contention.  This
series does not introduce any functional changes, it is only about
preparation.


This patch (of 5):

Currently link_va() and unlik_va(), in order to figure out a tree type,
compares a passed root value with a global free_vmap_area_root variable to
distinguish the augmented rb-tree from a regular one.  It is hard coded
since such functions can manipulate only with specific
"free_vmap_area_root" tree that represents a global free vmap space.

Make it common by introducing "_augment" versions of both internal
functions, so it is possible to deal with different trees.

There is no functional change as a result of this patch.

Link: https://lkml.kernel.org/r/20220607093449.3100-1-urezki@gmail.com
Link: https://lkml.kernel.org/r/20220607093449.3100-2-urezki@gmail.comSigned-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bbf535fd
...@@ -910,8 +910,9 @@ get_va_next_sibling(struct rb_node *parent, struct rb_node **link) ...@@ -910,8 +910,9 @@ get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
} }
static __always_inline void static __always_inline void
link_va(struct vmap_area *va, struct rb_root *root, __link_va(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link, struct list_head *head) struct rb_node *parent, struct rb_node **link,
struct list_head *head, bool augment)
{ {
/* /*
* VA is still not in the list, but we can * VA is still not in the list, but we can
...@@ -925,7 +926,7 @@ link_va(struct vmap_area *va, struct rb_root *root, ...@@ -925,7 +926,7 @@ link_va(struct vmap_area *va, struct rb_root *root,
/* Insert to the rb-tree */ /* Insert to the rb-tree */
rb_link_node(&va->rb_node, parent, link); rb_link_node(&va->rb_node, parent, link);
if (root == &free_vmap_area_root) { if (augment) {
/* /*
* Some explanation here. Just perform simple insertion * Some explanation here. Just perform simple insertion
* to the tree. We do not set va->subtree_max_size to * to the tree. We do not set va->subtree_max_size to
...@@ -949,12 +950,28 @@ link_va(struct vmap_area *va, struct rb_root *root, ...@@ -949,12 +950,28 @@ link_va(struct vmap_area *va, struct rb_root *root,
} }
static __always_inline void static __always_inline void
unlink_va(struct vmap_area *va, struct rb_root *root) link_va(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link,
struct list_head *head)
{
__link_va(va, root, parent, link, head, false);
}
static __always_inline void
link_va_augment(struct vmap_area *va, struct rb_root *root,
struct rb_node *parent, struct rb_node **link,
struct list_head *head)
{
__link_va(va, root, parent, link, head, true);
}
static __always_inline void
__unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
{ {
if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
return; return;
if (root == &free_vmap_area_root) if (augment)
rb_erase_augmented(&va->rb_node, rb_erase_augmented(&va->rb_node,
root, &free_vmap_area_rb_augment_cb); root, &free_vmap_area_rb_augment_cb);
else else
...@@ -964,6 +981,18 @@ unlink_va(struct vmap_area *va, struct rb_root *root) ...@@ -964,6 +981,18 @@ unlink_va(struct vmap_area *va, struct rb_root *root)
RB_CLEAR_NODE(&va->rb_node); RB_CLEAR_NODE(&va->rb_node);
} }
static __always_inline void
unlink_va(struct vmap_area *va, struct rb_root *root)
{
__unlink_va(va, root, false);
}
static __always_inline void
unlink_va_augment(struct vmap_area *va, struct rb_root *root)
{
__unlink_va(va, root, true);
}
#if DEBUG_AUGMENT_PROPAGATE_CHECK #if DEBUG_AUGMENT_PROPAGATE_CHECK
/* /*
* Gets called when remove the node and rotate. * Gets called when remove the node and rotate.
...@@ -1059,7 +1088,7 @@ insert_vmap_area_augment(struct vmap_area *va, ...@@ -1059,7 +1088,7 @@ insert_vmap_area_augment(struct vmap_area *va,
link = find_va_links(va, root, NULL, &parent); link = find_va_links(va, root, NULL, &parent);
if (link) { if (link) {
link_va(va, root, parent, link, head); link_va_augment(va, root, parent, link, head);
augment_tree_propagate_from(va); augment_tree_propagate_from(va);
} }
} }
...@@ -1076,8 +1105,8 @@ insert_vmap_area_augment(struct vmap_area *va, ...@@ -1076,8 +1105,8 @@ insert_vmap_area_augment(struct vmap_area *va,
* ongoing. * ongoing.
*/ */
static __always_inline struct vmap_area * static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area *va, __merge_or_add_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head) struct rb_root *root, struct list_head *head, bool augment)
{ {
struct vmap_area *sibling; struct vmap_area *sibling;
struct list_head *next; struct list_head *next;
...@@ -1139,7 +1168,7 @@ merge_or_add_vmap_area(struct vmap_area *va, ...@@ -1139,7 +1168,7 @@ merge_or_add_vmap_area(struct vmap_area *va,
* "normalized" because of rotation operations. * "normalized" because of rotation operations.
*/ */
if (merged) if (merged)
unlink_va(va, root); __unlink_va(va, root, augment);
sibling->va_end = va->va_end; sibling->va_end = va->va_end;
...@@ -1154,16 +1183,23 @@ merge_or_add_vmap_area(struct vmap_area *va, ...@@ -1154,16 +1183,23 @@ merge_or_add_vmap_area(struct vmap_area *va,
insert: insert:
if (!merged) if (!merged)
link_va(va, root, parent, link, head); __link_va(va, root, parent, link, head, augment);
return va; return va;
} }
static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
return __merge_or_add_vmap_area(va, root, head, false);
}
static __always_inline struct vmap_area * static __always_inline struct vmap_area *
merge_or_add_vmap_area_augment(struct vmap_area *va, merge_or_add_vmap_area_augment(struct vmap_area *va,
struct rb_root *root, struct list_head *head) struct rb_root *root, struct list_head *head)
{ {
va = merge_or_add_vmap_area(va, root, head); va = __merge_or_add_vmap_area(va, root, head, true);
if (va) if (va)
augment_tree_propagate_from(va); augment_tree_propagate_from(va);
...@@ -1347,7 +1383,7 @@ adjust_va_to_fit_type(struct vmap_area *va, ...@@ -1347,7 +1383,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
* V NVA V * V NVA V
* |---------------| * |---------------|
*/ */
unlink_va(va, &free_vmap_area_root); unlink_va_augment(va, &free_vmap_area_root);
kmem_cache_free(vmap_area_cachep, va); kmem_cache_free(vmap_area_cachep, va);
} else if (type == LE_FIT_TYPE) { } else if (type == LE_FIT_TYPE) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment