Commit 9f14d4f1 authored by Matthew Wilcox's avatar Matthew Wilcox

xarray: Add xa_reserve and xa_release

This function reserves a slot in the XArray for users which need
to acquire multiple locks before storing their entry in the tree and
so cannot use a plain xa_store().
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 2264f513
...@@ -293,6 +293,12 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``. ...@@ -293,6 +293,12 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
of this RCU period. You should restart the lookup from the head of this RCU period. You should restart the lookup from the head
of the array. of the array.
* - Zero
- :c:func:`xa_is_zero`
- Zero entries appear as ``NULL`` through the Normal API, but occupy
an entry in the XArray which can be used to reserve the index for
future use.
Other internal entries may be added in the future. As far as possible, they Other internal entries may be added in the future. As far as possible, they
will be handled by :c:func:`xas_retry`. will be handled by :c:func:`xas_retry`.
......
...@@ -32,7 +32,8 @@ ...@@ -32,7 +32,8 @@
* The following internal entries have a special meaning: * The following internal entries have a special meaning:
* *
* 0-62: Sibling entries * 0-62: Sibling entries
* 256: Retry entry * 256: Zero entry
* 257: Retry entry
* *
* Errors are also represented as internal entries, but use the negative * Errors are also represented as internal entries, but use the negative
* space (-4094 to -2). They're never stored in the slots array; only * space (-4094 to -2). They're never stored in the slots array; only
...@@ -277,6 +278,7 @@ void *xa_load(struct xarray *, unsigned long index); ...@@ -277,6 +278,7 @@ void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *xa_cmpxchg(struct xarray *, unsigned long index, void *xa_cmpxchg(struct xarray *, unsigned long index,
void *old, void *entry, gfp_t); void *old, void *entry, gfp_t);
int xa_reserve(struct xarray *, unsigned long index, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
...@@ -371,6 +373,20 @@ static inline int xa_insert(struct xarray *xa, unsigned long index, ...@@ -371,6 +373,20 @@ static inline int xa_insert(struct xarray *xa, unsigned long index,
return -EEXIST; return -EEXIST;
} }
/**
* xa_release() - Release a reserved entry.
* @xa: XArray.
* @index: Index of entry.
*
* After calling xa_reserve(), you can call this function to release the
* reservation. If the entry at @index has been stored to, this function
* will do nothing.
*/
static inline void xa_release(struct xarray *xa, unsigned long index)
{
xa_cmpxchg(xa, index, NULL, NULL, 0);
}
/** /**
* xa_for_each() - Iterate over a portion of an XArray. * xa_for_each() - Iterate over a portion of an XArray.
* @xa: XArray. * @xa: XArray.
...@@ -658,7 +674,19 @@ static inline bool xa_is_sibling(const void *entry) ...@@ -658,7 +674,19 @@ static inline bool xa_is_sibling(const void *entry)
(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
} }
#define XA_RETRY_ENTRY xa_mk_internal(256) #define XA_ZERO_ENTRY xa_mk_internal(256)
#define XA_RETRY_ENTRY xa_mk_internal(257)
/**
* xa_is_zero() - Is the entry a zero entry?
* @entry: Entry retrieved from the XArray
*
* Return: %true if the entry is a zero entry.
*/
static inline bool xa_is_zero(const void *entry)
{
return unlikely(entry == XA_ZERO_ENTRY);
}
/** /**
* xa_is_retry() - Is the entry a retry entry? * xa_is_retry() - Is the entry a retry entry?
...@@ -880,6 +908,8 @@ static inline void xas_reset(struct xa_state *xas) ...@@ -880,6 +908,8 @@ static inline void xas_reset(struct xa_state *xas)
*/ */
static inline bool xas_retry(struct xa_state *xas, const void *entry) static inline bool xas_retry(struct xa_state *xas, const void *entry)
{ {
if (xa_is_zero(entry))
return true;
if (!xa_is_retry(entry)) if (!xa_is_retry(entry))
return false; return false;
xas_reset(xas); xas_reset(xas);
......
...@@ -259,6 +259,45 @@ static noinline void check_cmpxchg(struct xarray *xa) ...@@ -259,6 +259,45 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, !xa_empty(xa));
} }
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
unsigned long index = 0;
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_load(xa, 12345678));
xa_release(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* Releasing a used entry does nothing */
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* cmpxchg sees a reserved entry as NULL */
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
xa_reserve(xa, 6, GFP_KERNEL);
xa_store_index(xa, 7, GFP_KERNEL);
xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
XA_BUG_ON(xa, index != 5 && index != 7);
}
xa_destroy(xa);
}
static noinline void check_xas_erase(struct xarray *xa) static noinline void check_xas_erase(struct xarray *xa)
{ {
XA_STATE(xas, xa, 0); XA_STATE(xas, xa, 0);
...@@ -808,6 +847,7 @@ static int xarray_checks(void) ...@@ -808,6 +847,7 @@ static int xarray_checks(void)
check_xa_shrink(&array); check_xa_shrink(&array);
check_xas_erase(&array); check_xas_erase(&array);
check_cmpxchg(&array); check_cmpxchg(&array);
check_reserve(&array);
check_multi_store(&array); check_multi_store(&array);
check_find(&array); check_find(&array);
check_destroy(&array); check_destroy(&array);
......
...@@ -1266,6 +1266,8 @@ void *xa_load(struct xarray *xa, unsigned long index) ...@@ -1266,6 +1266,8 @@ void *xa_load(struct xarray *xa, unsigned long index)
rcu_read_lock(); rcu_read_lock();
do { do {
entry = xas_load(&xas); entry = xas_load(&xas);
if (xa_is_zero(entry))
entry = NULL;
} while (xas_retry(&xas, entry)); } while (xas_retry(&xas, entry));
rcu_read_unlock(); rcu_read_unlock();
...@@ -1275,6 +1277,8 @@ EXPORT_SYMBOL(xa_load); ...@@ -1275,6 +1277,8 @@ EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr) static void *xas_result(struct xa_state *xas, void *curr)
{ {
if (xa_is_zero(curr))
return NULL;
XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
if (xas_error(xas)) if (xas_error(xas))
curr = xas->xa_node; curr = xas->xa_node;
...@@ -1394,6 +1398,8 @@ void *xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -1394,6 +1398,8 @@ void *xa_cmpxchg(struct xarray *xa, unsigned long index,
do { do {
xas_lock(&xas); xas_lock(&xas);
curr = xas_load(&xas); curr = xas_load(&xas);
if (curr == XA_ZERO_ENTRY)
curr = NULL;
if (curr == old) if (curr == old)
xas_store(&xas, entry); xas_store(&xas, entry);
xas_unlock(&xas); xas_unlock(&xas);
...@@ -1430,6 +1436,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -1430,6 +1436,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
do { do {
curr = xas_load(&xas); curr = xas_load(&xas);
if (curr == XA_ZERO_ENTRY)
curr = NULL;
if (curr == old) if (curr == old)
xas_store(&xas, entry); xas_store(&xas, entry);
} while (__xas_nomem(&xas, gfp)); } while (__xas_nomem(&xas, gfp));
...@@ -1438,6 +1446,43 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, ...@@ -1438,6 +1446,43 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
} }
EXPORT_SYMBOL(__xa_cmpxchg); EXPORT_SYMBOL(__xa_cmpxchg);
/**
* xa_reserve() - Reserve this index in the XArray.
* @xa: XArray.
* @index: Index into array.
* @gfp: Memory allocation flags.
*
* Ensures there is somewhere to store an entry at @index in the array.
* If there is already something stored at @index, this function does
* nothing. If there was nothing there, the entry is marked as reserved.
* Loads from @index will continue to see a %NULL pointer until a
* subsequent store to @index.
*
* If you do not use the entry that you have reserved, call xa_release()
* or xa_erase() to free any unnecessary memory.
*
* Context: Process context. Takes and releases the xa_lock, IRQ or BH safe
* if specified in XArray flags. May sleep if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
XA_STATE(xas, xa, index);
unsigned int lock_type = xa_lock_type(xa);
void *curr;
do {
xas_lock_type(&xas, lock_type);
curr = xas_load(&xas);
if (!curr)
xas_store(&xas, XA_ZERO_ENTRY);
xas_unlock_type(&xas, lock_type);
} while (xas_nomem(&xas, gfp));
return xas_error(&xas);
}
EXPORT_SYMBOL(xa_reserve);
/** /**
* __xa_set_mark() - Set this mark on this entry while locked. * __xa_set_mark() - Set this mark on this entry while locked.
* @xa: XArray. * @xa: XArray.
...@@ -1797,6 +1842,8 @@ void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) ...@@ -1797,6 +1842,8 @@ void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
pr_cont("retry (%ld)\n", xa_to_internal(entry)); pr_cont("retry (%ld)\n", xa_to_internal(entry));
else if (xa_is_sibling(entry)) else if (xa_is_sibling(entry))
pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else else
pr_cont("UNKNOWN ENTRY (%px)\n", entry); pr_cont("UNKNOWN ENTRY (%px)\n", entry);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment