Commit 3cc6f42a authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

maple_tree: move up mas_wr_store_setup() and mas_wr_prealloc_setup()

Subsequent patches require these definitions to be higher, no functional
changes intended.

Link: https://lkml.kernel.org/r/20240814161944.55347-4-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 19138a2c
......@@ -4227,6 +4227,54 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
mas_wr_modify(wr_mas);
}
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
if (!mas_is_active(wr_mas->mas)) {
if (mas_is_start(wr_mas->mas))
return;
if (unlikely(mas_is_paused(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_none(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_overflow(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_underflow(wr_mas->mas)))
goto reset;
}
/*
* A less strict version of mas_is_span_wr() where we allow spanning
* writes within this node. This is to stop partial walks in
* mas_prealloc() from being reset.
*/
if (wr_mas->mas->last > wr_mas->mas->max)
goto reset;
if (wr_mas->entry)
return;
if (mte_is_leaf(wr_mas->mas->node) &&
wr_mas->mas->last == wr_mas->mas->max)
goto reset;
return;
reset:
mas_reset(wr_mas->mas);
}
static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
mas_wr_store_setup(wr_mas);
wr_mas->content = mas_start(mas);
}
/**
* mas_insert() - Internal call to insert a value
* @mas: The maple state
......@@ -5358,54 +5406,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
mt_destroy_walk(enode, mt, true);
}
}
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
if (!mas_is_active(wr_mas->mas)) {
if (mas_is_start(wr_mas->mas))
return;
if (unlikely(mas_is_paused(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_none(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_overflow(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_underflow(wr_mas->mas)))
goto reset;
}
/*
* A less strict version of mas_is_span_wr() where we allow spanning
* writes within this node. This is to stop partial walks in
* mas_prealloc() from being reset.
*/
if (wr_mas->mas->last > wr_mas->mas->max)
goto reset;
if (wr_mas->entry)
return;
if (mte_is_leaf(wr_mas->mas->node) &&
wr_mas->mas->last == wr_mas->mas->max)
goto reset;
return;
reset:
mas_reset(wr_mas->mas);
}
static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
mas_wr_store_setup(wr_mas);
wr_mas->content = mas_start(mas);
}
/* Interface */
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment