Commit 2f5ff264 authored by Eli Cohen's avatar Eli Cohen Committed by Leon Romanovsky

mlx5: Fix naming convention with respect to UARs

This establishes a solid naming conventions for UARs. A UAR (User Access
Region) can have size identical to a system page or can be fixed 4KB
depending on a value queried by firmware. Each UAR always has 4 blue
flame register which are used to post doorbell to send queue. In
addition, a UAR has section used for posting doorbells to CQs or EQs. In
this patch we change names to reflect this conventions.
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Reviewed-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent f4044dac
......@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
void __iomem *uar_page = mdev->priv.bfregi.uars[0].map;
unsigned long irq_flags;
int ret = 0;
......@@ -790,7 +790,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = to_mucontext(context)->uuari.uars[0].index;
*index = to_mucontext(context)->bfregi.uars[0].index;
if (ucmd.cqe_comp_en == 1) {
if (unlikely((*cqe_size != 64) ||
......@@ -886,7 +886,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uuari.uars[0].index;
*index = dev->mdev->priv.bfregi.uars[0].index;
return 0;
......
......@@ -999,12 +999,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_bfreg_info *bfregi;
struct mlx5_uar *uars;
int gross_uuars;
int gross_bfregs;
int num_uars;
int ver;
int uuarn;
int bfregn;
int err;
int i;
size_t reqlen;
......@@ -1032,10 +1032,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (req.flags)
return ERR_PTR(-EINVAL);
if (req.total_num_uuars > MLX5_MAX_UUARS)
if (req.total_num_bfregs > MLX5_MAX_BFREGS)
return ERR_PTR(-ENOMEM);
if (req.total_num_uuars == 0)
if (req.total_num_bfregs == 0)
return ERR_PTR(-EINVAL);
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
......@@ -1046,13 +1046,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
reqlen - sizeof(req)))
return ERR_PTR(-EOPNOTSUPP);
req.total_num_uuars = ALIGN(req.total_num_uuars,
MLX5_NON_FP_BF_REGS_PER_PAGE);
if (req.num_low_latency_uuars > req.total_num_uuars - 1)
req.total_num_bfregs = ALIGN(req.total_num_bfregs,
MLX5_NON_FP_BFREGS_PER_UAR);
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return ERR_PTR(-EINVAL);
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
num_uars = req.total_num_bfregs / MLX5_NON_FP_BFREGS_PER_UAR;
gross_bfregs = num_uars * MLX5_BFREGS_PER_UAR;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
......@@ -1072,32 +1072,33 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (!context)
return ERR_PTR(-ENOMEM);
uuari = &context->uuari;
mutex_init(&uuari->lock);
bfregi = &context->bfregi;
mutex_init(&bfregi->lock);
uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
if (!uars) {
err = -ENOMEM;
goto out_ctx;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
sizeof(*uuari->bitmap),
bfregi->bitmap = kcalloc(BITS_TO_LONGS(gross_bfregs),
sizeof(*bfregi->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
if (!bfregi->bitmap) {
err = -ENOMEM;
goto out_uar_ctx;
}
/*
* clear all fast path uuars
* clear all fast path bfregs
*/
for (i = 0; i < gross_uuars; i++) {
uuarn = i & 3;
if (uuarn == 2 || uuarn == 3)
set_bit(i, uuari->bitmap);
for (i = 0; i < gross_bfregs; i++) {
bfregn = i & 3;
if (bfregn == 2 || bfregn == 3)
set_bit(i, bfregi->bitmap);
}
uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
bfregi->count = kcalloc(gross_bfregs,
sizeof(*bfregi->count), GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
goto out_bitmap;
}
......@@ -1130,7 +1131,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
resp.tot_uuars = req.total_num_uuars;
resp.tot_bfregs = req.total_num_bfregs;
resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
if (field_avail(typeof(resp), cqe_version, udata->outlen))
......@@ -1163,10 +1164,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_td;
uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
bfregi->ver = ver;
bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
bfregi->uars = uars;
bfregi->num_uars = num_uars;
context->cqe_version = resp.cqe_version;
return &context->ibucontext;
......@@ -1182,10 +1183,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
for (i--; i >= 0; i--)
mlx5_cmd_free_uar(dev->mdev, uars[i].index);
out_count:
kfree(uuari->count);
kfree(bfregi->count);
out_bitmap:
kfree(uuari->bitmap);
kfree(bfregi->bitmap);
out_uar_ctx:
kfree(uars);
......@@ -1199,7 +1200,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_uuar_info *uuari = &context->uuari;
struct mlx5_bfreg_info *bfregi = &context->bfregi;
int i;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
......@@ -1207,14 +1208,15 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
free_page(context->upd_xlt_page);
for (i = 0; i < uuari->num_uars; i++) {
if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
for (i = 0; i < bfregi->num_uars; i++) {
if (mlx5_cmd_free_uar(dev->mdev, bfregi->uars[i].index))
mlx5_ib_warn(dev, "Failed to free UAR 0x%x\n",
bfregi->uars[i].index);
}
kfree(uuari->count);
kfree(uuari->bitmap);
kfree(uuari->uars);
kfree(bfregi->count);
kfree(bfregi->bitmap);
kfree(bfregi->uars);
kfree(context);
return 0;
......@@ -1377,7 +1379,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
struct mlx5_uuar_info *uuari = &context->uuari;
struct mlx5_bfreg_info *bfregi = &context->bfregi;
int err;
unsigned long idx;
phys_addr_t pfn, pa;
......@@ -1408,10 +1410,10 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
idx = get_index(vma->vm_pgoff);
if (idx >= uuari->num_uars)
if (idx >= bfregi->num_uars)
return -EINVAL;
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
pfn = uar_index2pfn(dev, bfregi->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
......
......@@ -100,7 +100,7 @@ enum mlx5_ib_mad_ifc_flags {
};
enum {
MLX5_CROSS_CHANNEL_UUAR = 0,
MLX5_CROSS_CHANNEL_BFREG = 0,
};
enum {
......@@ -120,7 +120,7 @@ struct mlx5_ib_ucontext {
/* protect doorbell record alloc/free
*/
struct mutex db_page_mutex;
struct mlx5_uuar_info uuari;
struct mlx5_bfreg_info bfregi;
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
......@@ -355,7 +355,7 @@ struct mlx5_ib_qp {
/* only for user space QPs. For kernel
* we have it from the bf object
*/
int uuarn;
int bfregn;
int create_type;
......
This diff is collapsed.
......@@ -686,7 +686,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", &dev->priv.uuari.uars[0],
"mlx5_cmd_eq", &dev->priv.bfregi.uars[0],
MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
......@@ -697,7 +697,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
MLX5_NUM_ASYNC_EQE, async_event_mask,
"mlx5_async_eq", &dev->priv.uuari.uars[0],
"mlx5_async_eq", &dev->priv.bfregi.uars[0],
MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
......@@ -708,7 +708,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_EQ_VEC_PAGES,
/* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0],
&dev->priv.bfregi.uars[0],
MLX5_EQ_TYPE_ASYNC);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
......@@ -722,7 +722,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_NUM_ASYNC_EQE,
1 << MLX5_EVENT_TYPE_PAGE_FAULT,
"mlx5_page_fault_eq",
&dev->priv.uuari.uars[0],
&dev->priv.bfregi.uars[0],
MLX5_EQ_TYPE_PF);
if (err) {
mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
......
......@@ -753,7 +753,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0],
name, &dev->priv.bfregi.uars[0],
MLX5_EQ_TYPE_COMP);
if (err) {
kfree(eq);
......@@ -1094,7 +1094,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_cleanup_once;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
err = mlx5_alloc_bfregs(dev, &priv->bfregi);
if (err) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
goto err_disable_msix;
......@@ -1170,7 +1170,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_stop_eqs(dev);
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
mlx5_free_bfregs(dev, &priv->bfregi);
err_disable_msix:
mlx5_disable_msix(dev);
......@@ -1230,7 +1230,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
mlx5_free_bfregs(dev, &priv->bfregi);
mlx5_disable_msix(dev);
if (cleanup)
mlx5_cleanup_once(dev);
......
......@@ -39,7 +39,7 @@
enum {
NUM_DRIVER_UARS = 4,
NUM_LOW_LAT_UUARS = 4,
NUM_LOW_LAT_BFREGS = 4,
};
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
......@@ -67,116 +67,116 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
}
EXPORT_SYMBOL(mlx5_cmd_free_uar);
static int need_uuar_lock(int uuarn)
static int need_bfreg_lock(int bfregn)
{
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR;
if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
if (bfregn == 0 || tot_bfregs - NUM_LOW_LAT_BFREGS)
return 0;
return 1;
}
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi)
{
int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
int tot_bfregs = NUM_DRIVER_UARS * MLX5_BFREGS_PER_UAR;
struct mlx5_bf *bf;
phys_addr_t addr;
int err;
int i;
uuari->num_uars = NUM_DRIVER_UARS;
uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
bfregi->num_uars = NUM_DRIVER_UARS;
bfregi->num_low_latency_bfregs = NUM_LOW_LAT_BFREGS;
mutex_init(&uuari->lock);
uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
if (!uuari->uars)
mutex_init(&bfregi->lock);
bfregi->uars = kcalloc(bfregi->num_uars, sizeof(*bfregi->uars), GFP_KERNEL);
if (!bfregi->uars)
return -ENOMEM;
uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
if (!uuari->bfs) {
bfregi->bfs = kcalloc(tot_bfregs, sizeof(*bfregi->bfs), GFP_KERNEL);
if (!bfregi->bfs) {
err = -ENOMEM;
goto out_uars;
}
uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
bfregi->bitmap = kcalloc(BITS_TO_LONGS(tot_bfregs), sizeof(*bfregi->bitmap),
GFP_KERNEL);
if (!uuari->bitmap) {
if (!bfregi->bitmap) {
err = -ENOMEM;
goto out_bfs;
}
uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
if (!uuari->count) {
bfregi->count = kcalloc(tot_bfregs, sizeof(*bfregi->count), GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
goto out_bitmap;
}
for (i = 0; i < uuari->num_uars; i++) {
err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
for (i = 0; i < bfregi->num_uars; i++) {
err = mlx5_cmd_alloc_uar(dev, &bfregi->uars[i].index);
if (err)
goto out_count;
addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
if (!uuari->uars[i].map) {
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
addr = dev->iseg_base + ((phys_addr_t)(bfregi->uars[i].index) << PAGE_SHIFT);
bfregi->uars[i].map = ioremap(addr, PAGE_SIZE);
if (!bfregi->uars[i].map) {
mlx5_cmd_free_uar(dev, bfregi->uars[i].index);
err = -ENOMEM;
goto out_count;
}
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
uuari->uars[i].index, uuari->uars[i].map);
bfregi->uars[i].index, bfregi->uars[i].map);
}
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
for (i = 0; i < tot_bfregs; i++) {
bf = &bfregi->bfs[i];
bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->uar = &bfregi->uars[i / MLX5_BFREGS_PER_UAR];
bf->regreg = bfregi->uars[i / MLX5_BFREGS_PER_UAR].map;
bf->reg = NULL; /* Add WC support */
bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
bf->offset = (i % MLX5_BFREGS_PER_UAR) *
(1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
bf->need_lock = need_bfreg_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
bf->uuarn = i;
bf->bfregn = i;
}
return 0;
out_count:
for (i--; i >= 0; i--) {
iounmap(uuari->uars[i].map);
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
iounmap(bfregi->uars[i].map);
mlx5_cmd_free_uar(dev, bfregi->uars[i].index);
}
kfree(uuari->count);
kfree(bfregi->count);
out_bitmap:
kfree(uuari->bitmap);
kfree(bfregi->bitmap);
out_bfs:
kfree(uuari->bfs);
kfree(bfregi->bfs);
out_uars:
kfree(uuari->uars);
kfree(bfregi->uars);
return err;
}
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi)
{
int i = uuari->num_uars;
int i = bfregi->num_uars;
for (i--; i >= 0; i--) {
iounmap(uuari->uars[i].map);
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
iounmap(bfregi->uars[i].map);
mlx5_cmd_free_uar(dev, bfregi->uars[i].index);
}
kfree(uuari->count);
kfree(uuari->bitmap);
kfree(uuari->bfs);
kfree(uuari->uars);
kfree(bfregi->count);
kfree(bfregi->bitmap);
kfree(bfregi->bfs);
kfree(bfregi->uars);
return 0;
}
......
......@@ -212,10 +212,11 @@ enum {
};
enum {
MLX5_BF_REGS_PER_PAGE = 4,
MLX5_MAX_UAR_PAGES = 1 << 8,
MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
MLX5_BFREGS_PER_UAR = 4,
MLX5_MAX_UARS = 1 << 8,
MLX5_NON_FP_BFREGS_PER_UAR = 2,
MLX5_MAX_BFREGS = MLX5_MAX_UARS *
MLX5_NON_FP_BFREGS_PER_UAR,
};
enum {
......
......@@ -188,16 +188,16 @@ enum mlx5_eq_type {
#endif
};
struct mlx5_uuar_info {
struct mlx5_bfreg_info {
struct mlx5_uar *uars;
int num_uars;
int num_low_latency_uuars;
int num_low_latency_bfregs;
unsigned long *bitmap;
unsigned int *count;
struct mlx5_bf *bfs;
/*
* protect uuar allocation data structs
* protect bfreg allocation data structs
*/
struct mutex lock;
u32 ver;
......@@ -217,7 +217,7 @@ struct mlx5_bf {
/* serialize 64 bit writes when done as two 32 bit accesses
*/
spinlock_t lock32;
int uuarn;
int bfregn;
};
struct mlx5_cmd_first {
......@@ -579,7 +579,7 @@ struct mlx5_priv {
struct mlx5_eq_table eq_table;
struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
struct mlx5_bfreg_info bfregi;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */
......@@ -903,8 +903,8 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_alloc_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi);
int mlx5_free_bfregs(struct mlx5_core_dev *dev, struct mlx5_bfreg_info *bfregi);
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
......
......@@ -61,13 +61,13 @@ enum {
*/
struct mlx5_ib_alloc_ucontext_req {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
__u32 total_num_bfregs;
__u32 num_low_latency_bfregs;
};
struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
__u32 total_num_bfregs;
__u32 num_low_latency_bfregs;
__u32 flags;
__u32 comp_mask;
__u8 max_cqe_version;
......@@ -88,7 +88,7 @@ enum mlx5_user_cmds_supp_uhw {
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
__u32 tot_uuars;
__u32 tot_bfregs;
__u32 cache_line_size;
__u16 max_sq_desc_sz;
__u16 max_rq_desc_sz;
......@@ -241,7 +241,7 @@ struct mlx5_ib_create_qp_rss {
};
struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
__u32 bfreg_index;
};
struct mlx5_ib_alloc_mw {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment