Commit 31a78a5a authored by Yishai Hadas's avatar Yishai Hadas Committed by Jason Gunthorpe

IB/mlx5: Extend UAR stuff to support dynamic allocation

This patch extends the alloc context flow to be prepared for working
with dynamic UAR allocations.

Currently upon alloc context there is some fix size of UARs that are
allocated (named 'static allocation') and there is no option to user
application to ask for more or control which UAR will be used by which
QP.

In this patch the driver prepares its data structures to manage both the
static and the dynamic allocations and let the user driver knows about
the max value of dynamic blue-flame registers that are allowed.

Downstream patches from this series will enable the dynamic allocation
and the association as part of QP creation.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 4e2b53a5
......@@ -1240,9 +1240,18 @@ static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
static u16 calc_dynamic_bfregs(int uars_per_sys_page)
{
/* Large page with non 4k uar support might limit the dynamic size */
if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
return MLX5_MIN_DYN_BFREGS;
return MLX5_MAX_DYN_BFREGS;
}
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
u32 *num_sys_pages)
struct mlx5_bfreg_info *bfregi)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
......@@ -1259,16 +1268,21 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
/* This holds the required static allocation asked by the user */
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
*num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
req->total_num_bfregs, *num_sys_pages);
req->total_num_bfregs, bfregi->total_num_bfregs,
bfregi->num_sys_pages);
return 0;
}
......@@ -1280,7 +1294,7 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
int i;
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
......@@ -1304,7 +1318,7 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
int i;
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
if (err) {
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
......@@ -1419,13 +1433,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bfregi = &context->bfregi;
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
......@@ -1509,6 +1523,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
resp.response_length += sizeof(resp.num_dyn_bfregs);
}
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
......
......@@ -1113,10 +1113,10 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
static inline int get_num_uars(struct mlx5_ib_dev *dev,
static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
#endif /* MLX5_IB_H */
......@@ -493,7 +493,7 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
......
......@@ -244,6 +244,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
MLX5_MIN_DYN_BFREGS = 512,
MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
......
......@@ -230,6 +230,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
u32 num_static_sys_pages;
u32 total_num_bfregs;
u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
......
......@@ -125,6 +125,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
__u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment