Commit 7e9a1dad authored by Cheng Xu's avatar Cheng Xu Committed by Leon Romanovsky

RDMA/erdma: Allocate doorbell resources from hardware

Each ucontext will try to allocate doorbell resources in the extended bar
space from hardware. For compatibility, we change nothing for the original
bar space, and it will be used only for applications with CAP_SYS_RAWIO
authority in the older HW/FW environments.
Signed-off-by: default avatarCheng Xu <chengyou@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230606055005.80729-3-chengyou@linux.alibaba.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 128f8404
......@@ -268,6 +268,8 @@ static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
return FIELD_GET(filed_mask, val);
}
#define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val)
int erdma_cmdq_init(struct erdma_dev *dev);
void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev);
......
......@@ -160,6 +160,8 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_QUERY_FW_INFO = 2,
CMDQ_OPCODE_CONF_MTU = 3,
CMDQ_OPCODE_CONF_DEVICE = 5,
CMDQ_OPCODE_ALLOC_DB = 8,
CMDQ_OPCODE_FREE_DB = 9,
};
/* cmdq-SQE HDR */
......@@ -212,6 +214,26 @@ struct erdma_cmdq_config_mtu_req {
u32 mtu;
};
/* ext db requests(alloc and free) cfg */
#define ERDMA_CMD_EXT_DB_CQ_EN_MASK BIT(2)
#define ERDMA_CMD_EXT_DB_RQ_EN_MASK BIT(1)
#define ERDMA_CMD_EXT_DB_SQ_EN_MASK BIT(0)
struct erdma_cmdq_ext_db_req {
u64 hdr;
u32 cfg;
u16 rdb_off;
u16 sdb_off;
u16 rsvd0;
u16 cdb_off;
u32 rsvd1[3];
};
/* alloc db response qword 0 definition */
#define ERDMA_CMD_ALLOC_DB_RESP_RDB_MASK GENMASK_ULL(63, 48)
#define ERDMA_CMD_ALLOC_DB_RESP_CDB_MASK GENMASK_ULL(47, 32)
#define ERDMA_CMD_ALLOC_DB_RESP_SDB_MASK GENMASK_ULL(15, 0)
/* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
......
......@@ -1188,6 +1188,60 @@ static void alloc_db_resources(struct erdma_dev *dev,
ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
}
static int alloc_ext_db_resources(struct erdma_dev *dev,
struct erdma_ucontext *ctx)
{
struct erdma_cmdq_ext_db_req req = {};
u64 val0, val1;
int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_ALLOC_DB);
req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
if (ret)
return ret;
ctx->ext_db.enable = true;
ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB);
ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
ctx->sdb_type = ERDMA_SDB_PAGE;
ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
return 0;
}
static void free_ext_db_resources(struct erdma_dev *dev,
struct erdma_ucontext *ctx)
{
struct erdma_cmdq_ext_db_req req = {};
int ret;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_FREE_DB);
req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
req.sdb_off = ctx->ext_db.sdb_off;
req.rdb_off = ctx->ext_db.rdb_off;
req.cdb_off = ctx->ext_db.cdb_off;
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (ret)
ibdev_err_ratelimited(&dev->ibdev,
"free db resources failed %d", ret);
}
static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
{
rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry);
......@@ -1201,44 +1255,60 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibctx->device);
int ret;
struct erdma_uresp_alloc_ctx uresp = {};
bool ext_db_en;
if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
ret = -ENOMEM;
goto err_out;
}
if (udata->outlen < sizeof(uresp)) {
ret = -EINVAL;
goto err_out;
}
INIT_LIST_HEAD(&ctx->dbrecords_page_list);
mutex_init(&ctx->dbrecords_page_mutex);
alloc_db_resources(dev, ctx);
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
if (udata->outlen < sizeof(uresp)) {
ret = -EINVAL;
/*
* CAP_SYS_RAWIO is required if hardware does not support extend
* doorbell mechanism.
*/
ext_db_en = !!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_EXTEND_DB);
if (!ext_db_en && !capable(CAP_SYS_RAWIO)) {
ret = -EPERM;
goto err_out;
}
if (ext_db_en) {
ret = alloc_ext_db_resources(dev, ctx);
if (ret)
goto err_out;
} else {
alloc_db_resources(dev, ctx);
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
}
ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
if (!ctx->sq_db_mmap_entry) {
ret = -ENOMEM;
goto err_out;
goto err_free_ext_db;
}
ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
if (!ctx->rq_db_mmap_entry) {
ret = -EINVAL;
goto err_out;
goto err_put_mmap_entries;
}
ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
if (!ctx->cq_db_mmap_entry) {
ret = -EINVAL;
goto err_out;
goto err_put_mmap_entries;
}
uresp.dev_id = dev->pdev->device;
......@@ -1247,12 +1317,18 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret)
goto err_out;
goto err_put_mmap_entries;
return 0;
err_out:
err_put_mmap_entries:
erdma_uctx_user_mmap_entries_remove(ctx);
err_free_ext_db:
if (ext_db_en)
free_ext_db_resources(dev, ctx);
err_out:
atomic_dec(&dev->num_ctx);
return ret;
}
......@@ -1262,15 +1338,18 @@ void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
struct erdma_ucontext *ctx = to_ectx(ibctx);
struct erdma_dev *dev = to_edev(ibctx->device);
spin_lock(&dev->db_bitmap_lock);
if (ctx->sdb_type == ERDMA_SDB_PAGE)
clear_bit(ctx->sdb_idx, dev->sdb_page);
else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
clear_bit(ctx->sdb_idx, dev->sdb_entry);
erdma_uctx_user_mmap_entries_remove(ctx);
spin_unlock(&dev->db_bitmap_lock);
if (ctx->ext_db.enable) {
free_ext_db_resources(dev, ctx);
} else {
spin_lock(&dev->db_bitmap_lock);
if (ctx->sdb_type == ERDMA_SDB_PAGE)
clear_bit(ctx->sdb_idx, dev->sdb_page);
else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
clear_bit(ctx->sdb_idx, dev->sdb_entry);
spin_unlock(&dev->db_bitmap_lock);
}
atomic_dec(&dev->num_ctx);
}
......
......@@ -31,9 +31,18 @@ struct erdma_user_mmap_entry {
u8 mmap_flag;
};
struct erdma_ext_db_info {
bool enable;
u16 sdb_off;
u16 rdb_off;
u16 cdb_off;
};
struct erdma_ucontext {
struct ib_ucontext ibucontext;
struct erdma_ext_db_info ext_db;
u32 sdb_type;
u32 sdb_idx;
u32 sdb_page_idx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment