Commit 57e70716 authored by Shay Drory's avatar Shay Drory Committed by Leon Romanovsky

RDMA/mlx5: Implement mkeys management via LIFO queue

Currently, mkeys are managed via xarray. This implementation leads to
a degradation in cases many MRs are unregistered in parallel, due to xarray
internal implementation, for example: deregistration 1M MRs via 64 threads
is taking ~15% more time[1].

Hence, implement mkeys management via LIFO queue, which solved the
degradation.

[1]
2.8us in kernel v5.19 compare to 3.2us in kernel v6.4
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Link: https://lore.kernel.org/r/fde3d4cfab0f32f0ccb231cd113298256e1502c5.1695283384.git.leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent cb7ab785
...@@ -753,10 +753,25 @@ struct umr_common { ...@@ -753,10 +753,25 @@ struct umr_common {
unsigned int state; unsigned int state;
}; };
#define NUM_MKEYS_PER_PAGE \
((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32))
struct mlx5_mkeys_page {
u32 mkeys[NUM_MKEYS_PER_PAGE];
struct list_head list;
};
static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE);
struct mlx5_mkeys_queue {
struct list_head pages_list;
u32 num_pages;
unsigned long ci;
spinlock_t lock; /* sync list ops */
};
struct mlx5_cache_ent { struct mlx5_cache_ent {
struct xarray mkeys; struct mlx5_mkeys_queue mkeys_queue;
unsigned long stored; u32 pending;
unsigned long reserved;
char name[4]; char name[4];
......
...@@ -143,110 +143,47 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) ...@@ -143,110 +143,47 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
} }
static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings, static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey)
void *to_store)
{ {
XA_STATE(xas, &ent->mkeys, 0); unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE;
void *curr; struct mlx5_mkeys_page *page;
if (limit_pendings && lockdep_assert_held(&ent->mkeys_queue.lock);
(ent->reserved - ent->stored) > MAX_PENDING_REG_MR) if (ent->mkeys_queue.ci >=
return -EAGAIN; ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) {
page = kzalloc(sizeof(*page), GFP_ATOMIC);
while (1) { if (!page)
/* return -ENOMEM;
* This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version ent->mkeys_queue.num_pages++;
* doesn't transparently unlock. Instead we set the xas index to list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
* the current value of reserved every iteration. } else {
*/ page = list_last_entry(&ent->mkeys_queue.pages_list,
xas_set(&xas, ent->reserved); struct mlx5_mkeys_page, list);
curr = xas_load(&xas);
if (!curr) {
if (to_store && ent->stored == ent->reserved)
xas_store(&xas, to_store);
else
xas_store(&xas, XA_ZERO_ENTRY);
if (xas_valid(&xas)) {
ent->reserved++;
if (to_store) {
if (ent->stored != ent->reserved)
__xa_store(&ent->mkeys,
ent->stored,
to_store,
GFP_KERNEL);
ent->stored++;
queue_adjust_cache_locked(ent);
WRITE_ONCE(ent->dev->cache.last_add,
jiffies);
}
}
} }
xa_unlock_irq(&ent->mkeys);
/* page->mkeys[tmp] = mkey;
* Notice xas_nomem() must always be called as it cleans ent->mkeys_queue.ci++;
* up any cached allocation.
*/
if (!xas_nomem(&xas, GFP_KERNEL))
break;
xa_lock_irq(&ent->mkeys);
}
xa_lock_irq(&ent->mkeys);
if (xas_error(&xas))
return xas_error(&xas);
if (WARN_ON(curr))
return -EINVAL;
return 0; return 0;
} }
static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, static int pop_mkey_locked(struct mlx5_cache_ent *ent)
void *to_store)
{
int ret;
xa_lock_irq(&ent->mkeys);
ret = push_mkey_locked(ent, limit_pendings, to_store);
xa_unlock_irq(&ent->mkeys);
return ret;
}
static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
{
void *old;
ent->reserved--;
old = __xa_erase(&ent->mkeys, ent->reserved);
WARN_ON(old);
}
static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
{
void *old;
old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
WARN_ON(old);
ent->stored++;
}
static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
{ {
void *old, *xa_mkey; unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE;
struct mlx5_mkeys_page *last_page;
ent->stored--; u32 mkey;
ent->reserved--;
if (ent->stored == ent->reserved) { lockdep_assert_held(&ent->mkeys_queue.lock);
xa_mkey = __xa_erase(&ent->mkeys, ent->stored); last_page = list_last_entry(&ent->mkeys_queue.pages_list,
WARN_ON(!xa_mkey); struct mlx5_mkeys_page, list);
return (u32)xa_to_value(xa_mkey); mkey = last_page->mkeys[tmp];
last_page->mkeys[tmp] = 0;
ent->mkeys_queue.ci--;
if (ent->mkeys_queue.num_pages > 1 && !tmp) {
list_del(&last_page->list);
ent->mkeys_queue.num_pages--;
kfree(last_page);
} }
return mkey;
xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
GFP_KERNEL);
WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
old = __xa_erase(&ent->mkeys, ent->reserved);
WARN_ON(old);
return (u32)xa_to_value(xa_mkey);
} }
static void create_mkey_callback(int status, struct mlx5_async_work *context) static void create_mkey_callback(int status, struct mlx5_async_work *context)
...@@ -260,10 +197,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) ...@@ -260,10 +197,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
if (status) { if (status) {
create_mkey_warn(dev, status, mkey_out->out); create_mkey_warn(dev, status, mkey_out->out);
kfree(mkey_out); kfree(mkey_out);
xa_lock_irqsave(&ent->mkeys, flags); spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
undo_push_reserve_mkey(ent); ent->pending--;
WRITE_ONCE(dev->fill_delay, 1); WRITE_ONCE(dev->fill_delay, 1);
xa_unlock_irqrestore(&ent->mkeys, flags); spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
mod_timer(&dev->delay_timer, jiffies + HZ); mod_timer(&dev->delay_timer, jiffies + HZ);
return; return;
} }
...@@ -272,11 +209,12 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) ...@@ -272,11 +209,12 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
WRITE_ONCE(dev->cache.last_add, jiffies); WRITE_ONCE(dev->cache.last_add, jiffies);
xa_lock_irqsave(&ent->mkeys, flags); spin_lock_irqsave(&ent->mkeys_queue.lock, flags);
push_to_reserved(ent, mkey_out->mkey); push_mkey_locked(ent, mkey_out->mkey);
/* If we are doing fill_to_high_water then keep going. */ /* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
xa_unlock_irqrestore(&ent->mkeys, flags); ent->pending--;
spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags);
kfree(mkey_out); kfree(mkey_out);
} }
...@@ -332,24 +270,28 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) ...@@ -332,24 +270,28 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
set_cache_mkc(ent, mkc); set_cache_mkc(ent, mkc);
async_create->ent = ent; async_create->ent = ent;
err = push_mkey(ent, true, NULL); spin_lock_irq(&ent->mkeys_queue.lock);
if (err) if (ent->pending >= MAX_PENDING_REG_MR) {
err = -EAGAIN;
goto free_async_create; goto free_async_create;
}
ent->pending++;
spin_unlock_irq(&ent->mkeys_queue.lock);
err = mlx5_ib_create_mkey_cb(async_create); err = mlx5_ib_create_mkey_cb(async_create);
if (err) { if (err) {
mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
goto err_undo_reserve; goto err_create_mkey;
} }
} }
return 0; return 0;
err_undo_reserve: err_create_mkey:
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
undo_push_reserve_mkey(ent); ent->pending--;
xa_unlock_irq(&ent->mkeys);
free_async_create: free_async_create:
spin_unlock_irq(&ent->mkeys_queue.lock);
kfree(async_create); kfree(async_create);
return err; return err;
} }
...@@ -382,36 +324,36 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) ...@@ -382,36 +324,36 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{ {
u32 mkey; u32 mkey;
lockdep_assert_held(&ent->mkeys.xa_lock); lockdep_assert_held(&ent->mkeys_queue.lock);
if (!ent->stored) if (!ent->mkeys_queue.ci)
return; return;
mkey = pop_stored_mkey(ent); mkey = pop_mkey_locked(ent);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
mlx5_core_destroy_mkey(ent->dev->mdev, mkey); mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
} }
static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
bool limit_fill) bool limit_fill)
__acquires(&ent->mkeys) __releases(&ent->mkeys) __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock)
{ {
int err; int err;
lockdep_assert_held(&ent->mkeys.xa_lock); lockdep_assert_held(&ent->mkeys_queue.lock);
while (true) { while (true) {
if (limit_fill) if (limit_fill)
target = ent->limit * 2; target = ent->limit * 2;
if (target == ent->reserved) if (target == ent->pending + ent->mkeys_queue.ci)
return 0; return 0;
if (target > ent->reserved) { if (target > ent->pending + ent->mkeys_queue.ci) {
u32 todo = target - ent->reserved; u32 todo = target - (ent->pending + ent->mkeys_queue.ci);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
err = add_keys(ent, todo); err = add_keys(ent, todo);
if (err == -EAGAIN) if (err == -EAGAIN)
usleep_range(3000, 5000); usleep_range(3000, 5000);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (err) { if (err) {
if (err != -EAGAIN) if (err != -EAGAIN)
return err; return err;
...@@ -439,7 +381,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf, ...@@ -439,7 +381,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
* cannot free MRs that are in use. Compute the target value for stored * cannot free MRs that are in use. Compute the target value for stored
* mkeys. * mkeys.
*/ */
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (target < ent->in_use) { if (target < ent->in_use) {
err = -EINVAL; err = -EINVAL;
goto err_unlock; goto err_unlock;
...@@ -452,12 +394,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf, ...@@ -452,12 +394,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf,
err = resize_available_mrs(ent, target, false); err = resize_available_mrs(ent, target, false);
if (err) if (err)
goto err_unlock; goto err_unlock;
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
return count; return count;
err_unlock: err_unlock:
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
return err; return err;
} }
...@@ -468,7 +410,8 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, ...@@ -468,7 +410,8 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
char lbuf[20]; char lbuf[20];
int err; int err;
err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); err = snprintf(lbuf, sizeof(lbuf), "%ld\n",
ent->mkeys_queue.ci + ent->in_use);
if (err < 0) if (err < 0)
return err; return err;
...@@ -497,10 +440,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, ...@@ -497,10 +440,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf,
* Upon set we immediately fill the cache to high water mark implied by * Upon set we immediately fill the cache to high water mark implied by
* the limit. * the limit.
*/ */
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
ent->limit = var; ent->limit = var;
err = resize_available_mrs(ent, 0, true); err = resize_available_mrs(ent, 0, true);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
if (err) if (err)
return err; return err;
return count; return count;
...@@ -536,9 +479,9 @@ static bool someone_adding(struct mlx5_mkey_cache *cache) ...@@ -536,9 +479,9 @@ static bool someone_adding(struct mlx5_mkey_cache *cache)
mutex_lock(&cache->rb_lock); mutex_lock(&cache->rb_lock);
for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node); ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
ret = ent->stored < ent->limit; ret = ent->mkeys_queue.ci < ent->limit;
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
if (ret) { if (ret) {
mutex_unlock(&cache->rb_lock); mutex_unlock(&cache->rb_lock);
return true; return true;
...@@ -555,26 +498,26 @@ static bool someone_adding(struct mlx5_mkey_cache *cache) ...@@ -555,26 +498,26 @@ static bool someone_adding(struct mlx5_mkey_cache *cache)
*/ */
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{ {
lockdep_assert_held(&ent->mkeys.xa_lock); lockdep_assert_held(&ent->mkeys_queue.lock);
if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
return; return;
if (ent->stored < ent->limit) { if (ent->mkeys_queue.ci < ent->limit) {
ent->fill_to_high_water = true; ent->fill_to_high_water = true;
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->fill_to_high_water && } else if (ent->fill_to_high_water &&
ent->reserved < 2 * ent->limit) { ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) {
/* /*
* Once we start populating due to hitting a low water mark * Once we start populating due to hitting a low water mark
* continue until we pass the high water mark. * continue until we pass the high water mark.
*/ */
mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
} else if (ent->stored == 2 * ent->limit) { } else if (ent->mkeys_queue.ci == 2 * ent->limit) {
ent->fill_to_high_water = false; ent->fill_to_high_water = false;
} else if (ent->stored > 2 * ent->limit) { } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
/* Queue deletion of excess entries */ /* Queue deletion of excess entries */
ent->fill_to_high_water = false; ent->fill_to_high_water = false;
if (ent->stored != ent->reserved) if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork, queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
else else
...@@ -588,15 +531,16 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -588,15 +531,16 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_mkey_cache *cache = &dev->cache;
int err; int err;
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled) if (ent->disabled)
goto out; goto out;
if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && if (ent->fill_to_high_water &&
ent->mkeys_queue.ci + ent->pending < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) { !READ_ONCE(dev->fill_delay)) {
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
err = add_keys(ent, 1); err = add_keys(ent, 1);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled) if (ent->disabled)
goto out; goto out;
if (err) { if (err) {
...@@ -614,7 +558,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -614,7 +558,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
} }
} }
} else if (ent->stored > 2 * ent->limit) { } else if (ent->mkeys_queue.ci > 2 * ent->limit) {
bool need_delay; bool need_delay;
/* /*
...@@ -629,11 +573,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -629,11 +573,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
* the garbage collection work to try to run in next cycle, in * the garbage collection work to try to run in next cycle, in
* order to free CPU resources to other tasks. * order to free CPU resources to other tasks.
*/ */
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
need_delay = need_resched() || someone_adding(cache) || need_delay = need_resched() || someone_adding(cache) ||
!time_after(jiffies, !time_after(jiffies,
READ_ONCE(cache->last_add) + 300 * HZ); READ_ONCE(cache->last_add) + 300 * HZ);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (ent->disabled) if (ent->disabled)
goto out; goto out;
if (need_delay) { if (need_delay) {
...@@ -644,7 +588,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) ...@@ -644,7 +588,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
} }
out: out:
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
} }
static void delayed_cache_work_func(struct work_struct *work) static void delayed_cache_work_func(struct work_struct *work)
...@@ -752,25 +696,25 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ...@@ -752,25 +696,25 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
ent->in_use++; ent->in_use++;
if (!ent->stored) { if (!ent->mkeys_queue.ci) {
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
ent->miss++; ent->miss++;
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
err = create_cache_mkey(ent, &mr->mmkey.key); err = create_cache_mkey(ent, &mr->mmkey.key);
if (err) { if (err) {
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
ent->in_use--; ent->in_use--;
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
kfree(mr); kfree(mr);
return ERR_PTR(err); return ERR_PTR(err);
} }
} else { } else {
mr->mmkey.key = pop_stored_mkey(ent); mr->mmkey.key = pop_mkey_locked(ent);
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
} }
mr->mmkey.cache_ent = ent; mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR; mr->mmkey.type = MLX5_MKEY_MR;
...@@ -824,14 +768,14 @@ static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) ...@@ -824,14 +768,14 @@ static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
u32 mkey; u32 mkey;
cancel_delayed_work(&ent->dwork); cancel_delayed_work(&ent->dwork);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
while (ent->stored) { while (ent->mkeys_queue.ci) {
mkey = pop_stored_mkey(ent); mkey = pop_mkey_locked(ent);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
mlx5_core_destroy_mkey(dev->mdev, mkey); mlx5_core_destroy_mkey(dev->mdev, mkey);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
} }
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
} }
static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
...@@ -859,7 +803,7 @@ static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev, ...@@ -859,7 +803,7 @@ static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
dir = debugfs_create_dir(ent->name, dev->cache.fs_root); dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
debugfs_create_file("size", 0600, dir, ent, &size_fops); debugfs_create_file("size", 0600, dir, ent, &size_fops);
debugfs_create_file("limit", 0600, dir, ent, &limit_fops); debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
debugfs_create_ulong("cur", 0400, dir, &ent->stored); debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci);
debugfs_create_u32("miss", 0600, dir, &ent->miss); debugfs_create_u32("miss", 0600, dir, &ent->miss);
} }
...@@ -881,6 +825,31 @@ static void delay_time_func(struct timer_list *t) ...@@ -881,6 +825,31 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0); WRITE_ONCE(dev->fill_delay, 0);
} }
static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent)
{
struct mlx5_mkeys_page *page;
page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
return -ENOMEM;
INIT_LIST_HEAD(&ent->mkeys_queue.pages_list);
spin_lock_init(&ent->mkeys_queue.lock);
list_add_tail(&page->list, &ent->mkeys_queue.pages_list);
ent->mkeys_queue.num_pages++;
return 0;
}
static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent)
{
struct mlx5_mkeys_page *page;
WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1);
page = list_last_entry(&ent->mkeys_queue.pages_list,
struct mlx5_mkeys_page, list);
list_del(&page->list);
kfree(page);
}
struct mlx5_cache_ent * struct mlx5_cache_ent *
mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
struct mlx5r_cache_rb_key rb_key, struct mlx5r_cache_rb_key rb_key,
...@@ -894,7 +863,9 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, ...@@ -894,7 +863,9 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
if (!ent) if (!ent)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); ret = mlx5r_mkeys_init(ent);
if (ret)
goto mkeys_err;
ent->rb_key = rb_key; ent->rb_key = rb_key;
ent->dev = dev; ent->dev = dev;
ent->is_tmp = !persistent_entry; ent->is_tmp = !persistent_entry;
...@@ -902,10 +873,8 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, ...@@ -902,10 +873,8 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
ret = mlx5_cache_ent_insert(&dev->cache, ent); ret = mlx5_cache_ent_insert(&dev->cache, ent);
if (ret) { if (ret)
kfree(ent); goto ent_insert_err;
return ERR_PTR(ret);
}
if (persistent_entry) { if (persistent_entry) {
if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
...@@ -928,6 +897,11 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, ...@@ -928,6 +897,11 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
} }
return ent; return ent;
ent_insert_err:
mlx5r_mkeys_uninit(ent);
mkeys_err:
kfree(ent);
return ERR_PTR(ret);
} }
static void remove_ent_work_func(struct work_struct *work) static void remove_ent_work_func(struct work_struct *work)
...@@ -945,13 +919,13 @@ static void remove_ent_work_func(struct work_struct *work) ...@@ -945,13 +919,13 @@ static void remove_ent_work_func(struct work_struct *work)
cur = rb_prev(cur); cur = rb_prev(cur);
mutex_unlock(&cache->rb_lock); mutex_unlock(&cache->rb_lock);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
if (!ent->is_tmp) { if (!ent->is_tmp) {
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
mutex_lock(&cache->rb_lock); mutex_lock(&cache->rb_lock);
continue; continue;
} }
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
clean_keys(ent->dev, ent); clean_keys(ent->dev, ent);
mutex_lock(&cache->rb_lock); mutex_lock(&cache->rb_lock);
...@@ -1001,9 +975,9 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) ...@@ -1001,9 +975,9 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mutex_unlock(&cache->rb_lock); mutex_unlock(&cache->rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) { for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node); ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
queue_adjust_cache_locked(ent); queue_adjust_cache_locked(ent);
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
} }
return 0; return 0;
...@@ -1028,9 +1002,9 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) ...@@ -1028,9 +1002,9 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
mutex_lock(&dev->cache.rb_lock); mutex_lock(&dev->cache.rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) { for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node); ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys); spin_lock_irq(&ent->mkeys_queue.lock);
ent->disabled = true; ent->disabled = true;
xa_unlock_irq(&ent->mkeys); spin_unlock_irq(&ent->mkeys_queue.lock);
cancel_delayed_work_sync(&ent->dwork); cancel_delayed_work_sync(&ent->dwork);
} }
...@@ -1043,6 +1017,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) ...@@ -1043,6 +1017,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
node = rb_next(node); node = rb_next(node);
clean_keys(dev, ent); clean_keys(dev, ent);
rb_erase(&ent->node, root); rb_erase(&ent->node, root);
mlx5r_mkeys_uninit(ent);
kfree(ent); kfree(ent);
} }
mutex_unlock(&dev->cache.rb_lock); mutex_unlock(&dev->cache.rb_lock);
...@@ -1815,7 +1790,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, ...@@ -1815,7 +1790,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
int ret; int ret;
if (mr->mmkey.cache_ent) { if (mr->mmkey.cache_ent) {
xa_lock_irq(&mr->mmkey.cache_ent->mkeys); spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
mr->mmkey.cache_ent->in_use--; mr->mmkey.cache_ent->in_use--;
goto end; goto end;
} }
...@@ -1829,7 +1804,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, ...@@ -1829,7 +1804,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
mr->mmkey.cache_ent = ent; mr->mmkey.cache_ent = ent;
xa_lock_irq(&mr->mmkey.cache_ent->mkeys); spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
mutex_unlock(&cache->rb_lock); mutex_unlock(&cache->rb_lock);
goto end; goto end;
} }
...@@ -1841,12 +1816,11 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, ...@@ -1841,12 +1816,11 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
return PTR_ERR(ent); return PTR_ERR(ent);
mr->mmkey.cache_ent = ent; mr->mmkey.cache_ent = ent;
xa_lock_irq(&mr->mmkey.cache_ent->mkeys); spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
end: end:
ret = push_mkey_locked(mr->mmkey.cache_ent, false, ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key);
xa_mk_value(mr->mmkey.key)); spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock);
xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
return ret; return ret;
} }
......
...@@ -332,8 +332,8 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, ...@@ -332,8 +332,8 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
mlx5_ib_warn(dev, mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n", "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
umr_context.status); umr_context.status, mkey);
mutex_lock(&umrc->lock); mutex_lock(&umrc->lock);
err = mlx5r_umr_recover(dev); err = mlx5r_umr_recover(dev);
mutex_unlock(&umrc->lock); mutex_unlock(&umrc->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment