Commit 3b3dfd58 authored by Cheng Xu's avatar Cheng Xu Committed by Leon Romanovsky

RDMA/erdma: Refactor the original doorbell allocation mechanism

The original doorbell allocation mechanism is complex and does not meet
the isolation requirement. So we introduce a new doorbell mechanism and the
original mechanism (only be used with CAP_SYS_RAWIO if hardware does not
support the new mechanism) needs to be kept as simple as possible for
compatibility.
Signed-off-by: default avatarCheng Xu <chengyou@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230606055005.80729-5-chengyou@linux.alibaba.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 6534de1f
......@@ -128,13 +128,8 @@ struct erdma_devattr {
int numa_node;
enum erdma_cc_alg cc;
u32 grp_num;
u32 irq_num;
bool disable_dwqe;
u16 dwqe_pages;
u16 dwqe_entries;
u32 max_qp;
u32 max_send_wr;
u32 max_recv_wr;
......@@ -215,15 +210,6 @@ struct erdma_dev {
u32 next_alloc_qpn;
u32 next_alloc_cqn;
spinlock_t db_bitmap_lock;
/* We provide max 64 uContexts that each has one SQ doorbell Page. */
DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
/*
* We provide max 496 uContexts that each has one SQ normal Db,
* and one directWQE db.
*/
DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
atomic_t num_ctx;
struct list_head cep_list;
};
......
......@@ -82,19 +82,6 @@
#define ERDMA_BAR_CQDB_SPACE_OFFSET \
(ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
/* Doorbell page resources related. */
/*
* Max # of parallelly issued directSQE is 3072 per device,
* hardware organizes this into 24 group, per group has 128 credits.
*/
#define ERDMA_DWQE_MAX_GRP_CNT 24
#define ERDMA_DWQE_NUM_PER_GRP 128
#define ERDMA_DWQE_TYPE0_CNT 64
#define ERDMA_DWQE_TYPE1_CNT 496
/* type1 DB contains 2 DBs, takes 256Byte. */
#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
#define ERDMA_SDB_SHARED_PAGE_INDEX 95
/* Doorbell related. */
......
......@@ -130,33 +130,6 @@ static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
static void erdma_dwqe_resource_init(struct erdma_dev *dev)
{
int total_pages, type0, type1;
dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
if (dev->attrs.grp_num < 4)
dev->attrs.disable_dwqe = true;
else
dev->attrs.disable_dwqe = false;
/* One page contains 4 goups. */
total_pages = dev->attrs.grp_num * 4;
if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
type0 = ERDMA_DWQE_TYPE0_CNT;
type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
} else {
type1 = total_pages / 3;
type0 = total_pages - type1 - 1;
}
dev->attrs.dwqe_pages = type0;
dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
}
static int erdma_request_vectors(struct erdma_dev *dev)
{
int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
......@@ -199,8 +172,6 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
erdma_dwqe_resource_init(dev);
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
......@@ -557,10 +528,6 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
if (ret)
return ret;
spin_lock_init(&dev->db_bitmap_lock);
bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
atomic_set(&dev->num_ctx, 0);
mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
......
......@@ -1149,71 +1149,27 @@ void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(entry);
}
#define ERDMA_SDB_PAGE 0
#define ERDMA_SDB_ENTRY 1
#define ERDMA_SDB_SHARED 2
static void alloc_db_resources(struct erdma_dev *dev,
struct erdma_ucontext *ctx)
{
u32 bitmap_idx;
struct erdma_devattr *attrs = &dev->attrs;
if (attrs->disable_dwqe)
goto alloc_normal_db;
/* Try to alloc independent SDB page. */
spin_lock(&dev->db_bitmap_lock);
bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
if (bitmap_idx != attrs->dwqe_pages) {
set_bit(bitmap_idx, dev->sdb_page);
spin_unlock(&dev->db_bitmap_lock);
ctx->sdb_type = ERDMA_SDB_PAGE;
ctx->sdb_idx = bitmap_idx;
ctx->sdb_page_idx = bitmap_idx;
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
(bitmap_idx << PAGE_SHIFT);
ctx->sdb_page_off = 0;
return;
}
bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
if (bitmap_idx != attrs->dwqe_entries) {
set_bit(bitmap_idx, dev->sdb_entry);
spin_unlock(&dev->db_bitmap_lock);
ctx->sdb_type = ERDMA_SDB_ENTRY;
ctx->sdb_idx = bitmap_idx;
ctx->sdb_page_idx = attrs->dwqe_pages +
bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
(ctx->sdb_page_idx << PAGE_SHIFT);
return;
}
spin_unlock(&dev->db_bitmap_lock);
alloc_normal_db:
ctx->sdb_type = ERDMA_SDB_SHARED;
ctx->sdb_idx = 0;
ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
ctx->sdb_page_off = 0;
ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
}
static int alloc_ext_db_resources(struct erdma_dev *dev,
struct erdma_ucontext *ctx)
static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
bool ext_db_en)
{
struct erdma_cmdq_ext_db_req req = {};
u64 val0, val1;
int ret;
/*
* CAP_SYS_RAWIO is required if hardware does not support extend
* doorbell mechanism.
*/
if (!ext_db_en && !capable(CAP_SYS_RAWIO))
return -EPERM;
if (!ext_db_en) {
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET;
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
return 0;
}
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_ALLOC_DB);
......@@ -1230,7 +1186,6 @@ static int alloc_ext_db_resources(struct erdma_dev *dev,
ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
ctx->sdb_type = ERDMA_SDB_PAGE;
ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
......@@ -1238,12 +1193,14 @@ static int alloc_ext_db_resources(struct erdma_dev *dev,
return 0;
}
static void free_ext_db_resources(struct erdma_dev *dev,
struct erdma_ucontext *ctx)
static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
{
struct erdma_cmdq_ext_db_req req = {};
int ret;
if (!ctx->ext_db.enable)
return;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_FREE_DB);
......@@ -1274,7 +1231,6 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
struct erdma_dev *dev = to_edev(ibctx->device);
int ret;
struct erdma_uresp_alloc_ctx uresp = {};
bool ext_db_en;
if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) {
ret = -ENOMEM;
......@@ -1289,25 +1245,11 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
INIT_LIST_HEAD(&ctx->dbrecords_page_list);
mutex_init(&ctx->dbrecords_page_mutex);
/*
* CAP_SYS_RAWIO is required if hardware does not support extend
* doorbell mechanism.
*/
ext_db_en = !!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_EXTEND_DB);
if (!ext_db_en && !capable(CAP_SYS_RAWIO)) {
ret = -EPERM;
ret = alloc_db_resources(dev, ctx,
!!(dev->attrs.cap_flags &
ERDMA_DEV_CAP_FLAGS_EXTEND_DB));
if (ret)
goto err_out;
}
if (ext_db_en) {
ret = alloc_ext_db_resources(dev, ctx);
if (ret)
goto err_out;
} else {
alloc_db_resources(dev, ctx);
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
}
ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
......@@ -1331,8 +1273,6 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
}
uresp.dev_id = dev->pdev->device;
uresp.sdb_type = ctx->sdb_type;
uresp.sdb_offset = ctx->sdb_page_off;
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret)
......@@ -1344,8 +1284,7 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
erdma_uctx_user_mmap_entries_remove(ctx);
err_free_ext_db:
if (ext_db_en)
free_ext_db_resources(dev, ctx);
free_db_resources(dev, ctx);
err_out:
atomic_dec(&dev->num_ctx);
......@@ -1354,22 +1293,11 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct erdma_ucontext *ctx = to_ectx(ibctx);
struct erdma_dev *dev = to_edev(ibctx->device);
struct erdma_ucontext *ctx = to_ectx(ibctx);
erdma_uctx_user_mmap_entries_remove(ctx);
if (ctx->ext_db.enable) {
free_ext_db_resources(dev, ctx);
} else {
spin_lock(&dev->db_bitmap_lock);
if (ctx->sdb_type == ERDMA_SDB_PAGE)
clear_bit(ctx->sdb_idx, dev->sdb_page);
else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
clear_bit(ctx->sdb_idx, dev->sdb_entry);
spin_unlock(&dev->db_bitmap_lock);
}
free_db_resources(dev, ctx);
atomic_dec(&dev->num_ctx);
}
......
......@@ -43,10 +43,6 @@ struct erdma_ucontext {
struct erdma_ext_db_info ext_db;
u32 sdb_type;
u32 sdb_idx;
u32 sdb_page_idx;
u32 sdb_page_off;
u64 sdb;
u64 rdb;
u64 cdb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment