Commit 2a3d923f authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe

RDMA/hns: Replace magic numbers with #defines

This patch makes the code more readable by removing magic numbers.
Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 669cefb6
...@@ -78,7 +78,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir( ...@@ -78,7 +78,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
if (!pgdir) if (!pgdir)
return NULL; return NULL;
bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2); bitmap_fill(pgdir->order1,
HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
pgdir->bits[0] = pgdir->order0; pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1; pgdir->bits[1] = pgdir->order1;
pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE, pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
...@@ -116,7 +117,7 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir, ...@@ -116,7 +117,7 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
db->u.pgdir = pgdir; db->u.pgdir = pgdir;
db->index = i; db->index = i;
db->db_record = pgdir->page + db->index; db->db_record = pgdir->page + db->index;
db->dma = pgdir->db_dma + db->index * 4; db->dma = pgdir->db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE;
db->order = order; db->order = order;
return 0; return 0;
...@@ -170,7 +171,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) ...@@ -170,7 +171,8 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
i >>= o; i >>= o;
set_bit(i, db->u.pgdir->bits[o]); set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) { if (bitmap_full(db->u.pgdir->order1,
HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
db->u.pgdir->db_dma); db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
......
...@@ -37,9 +37,12 @@ ...@@ -37,9 +37,12 @@
#define DRV_NAME "hns_roce" #define DRV_NAME "hns_roce"
/* hip08 is a pci device, it includes two version according pci version id */
#define PCI_REVISION_ID_HIP08_A 0x20
#define PCI_REVISION_ID_HIP08_B 0x21
#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
#define MAC_ADDR_OCTET_NUM 6
#define HNS_ROCE_MAX_MSG_LEN 0x80000000 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b)) #define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
...@@ -48,6 +51,10 @@ ...@@ -48,6 +51,10 @@
#define HNS_ROCE_BA_SIZE (32 * 4096) #define HNS_ROCE_BA_SIZE (32 * 4096)
#define BA_BYTE_LEN 8
#define BITS_PER_BYTE 8
/* Hardware specification only for v1 engine */ /* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40 #define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20 #define HNS_ROCE_MIN_WQE_NUM 0x20
...@@ -55,6 +62,7 @@ ...@@ -55,6 +62,7 @@
/* Hardware specification only for v1 engine */ /* Hardware specification only for v1 engine */
#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
#define HNS_ROCE_MAX_SGE_NUM 2
#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
...@@ -64,6 +72,9 @@ ...@@ -64,6 +72,9 @@
#define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
#define HNS_ROCE_SGE_SHIFT 4
#define EQ_ENABLE 1 #define EQ_ENABLE 1
#define EQ_DISABLE 0 #define EQ_DISABLE 0
...@@ -81,6 +92,7 @@ ...@@ -81,6 +92,7 @@
#define HNS_ROCE_MAX_PORTS 6 #define HNS_ROCE_MAX_PORTS 6
#define HNS_ROCE_MAX_GID_NUM 16 #define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16 #define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_SGE_SIZE 16
#define HNS_ROCE_HOP_NUM_0 0xff #define HNS_ROCE_HOP_NUM_0 0xff
...@@ -111,6 +123,8 @@ ...@@ -111,6 +123,8 @@
#define PAGES_SHIFT_24 24 #define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32 #define PAGES_SHIFT_32 32
#define HNS_ROCE_PCI_BAR_NUM 2
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230 #define SRQ_DB_REG 0x230
...@@ -213,6 +227,9 @@ enum hns_roce_mtt_type { ...@@ -213,6 +227,9 @@ enum hns_roce_mtt_type {
MTT_TYPE_IDX MTT_TYPE_IDX
}; };
#define HNS_ROCE_DB_TYPE_COUNT 2
#define HNS_ROCE_DB_UNIT_SIZE 4
enum { enum {
HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
}; };
...@@ -413,8 +430,8 @@ struct hns_roce_buf { ...@@ -413,8 +430,8 @@ struct hns_roce_buf {
struct hns_roce_db_pgdir { struct hns_roce_db_pgdir {
struct list_head list; struct list_head list;
DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE); DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2); DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
unsigned long *bits[2]; unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
u32 *page; u32 *page;
dma_addr_t db_dma; dma_addr_t db_dma;
}; };
...@@ -535,7 +552,7 @@ struct hns_roce_av { ...@@ -535,7 +552,7 @@ struct hns_roce_av {
u8 hop_limit; u8 hop_limit;
__le32 sl_tclass_flowlabel; __le32 sl_tclass_flowlabel;
u8 dgid[HNS_ROCE_GID_SIZE]; u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6]; u8 mac[ETH_ALEN];
__le16 vlan; __le16 vlan;
bool vlan_en; bool vlan_en;
}; };
...@@ -940,6 +957,16 @@ struct hns_roce_hw { ...@@ -940,6 +957,16 @@ struct hns_roce_hw {
const struct ib_device_ops *hns_roce_dev_srq_ops; const struct ib_device_ops *hns_roce_dev_srq_ops;
}; };
enum hns_phy_state {
HNS_ROCE_PHY_SLEEP = 1,
HNS_ROCE_PHY_POLLING = 2,
HNS_ROCE_PHY_DISABLED = 3,
HNS_ROCE_PHY_TRAINING = 4,
HNS_ROCE_PHY_LINKUP = 5,
HNS_ROCE_PHY_LINKERR = 6,
HNS_ROCE_PHY_TEST = 7
};
struct hns_roce_dev { struct hns_roce_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct platform_device *pdev; struct platform_device *pdev;
...@@ -962,7 +989,7 @@ struct hns_roce_dev { ...@@ -962,7 +989,7 @@ struct hns_roce_dev {
struct hns_roce_caps caps; struct hns_roce_caps caps;
struct xarray qp_table_xa; struct xarray qp_table_xa;
unsigned char dev_addr[HNS_ROCE_MAX_PORTS][MAC_ADDR_OCTET_NUM]; unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
u64 sys_image_guid; u64 sys_image_guid;
u32 vendor_id; u32 vendor_id;
u32 vendor_part_id; u32 vendor_part_id;
......
...@@ -165,7 +165,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -165,7 +165,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.mtt_hop_num; mhop->hop_num = hr_dev->caps.mtt_hop_num;
break; break;
case HEM_TYPE_CQE: case HEM_TYPE_CQE:
...@@ -173,7 +173,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -173,7 +173,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.cqe_hop_num; mhop->hop_num = hr_dev->caps.cqe_hop_num;
break; break;
case HEM_TYPE_SRQWQE: case HEM_TYPE_SRQWQE:
...@@ -181,7 +181,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -181,7 +181,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.srqwqe_hop_num; mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
break; break;
case HEM_TYPE_IDX: case HEM_TYPE_IDX:
...@@ -189,7 +189,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -189,7 +189,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
mhop->ba_l0_num = mhop->bt_chunk_size / 8; mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
mhop->hop_num = hr_dev->caps.idx_hop_num; mhop->hop_num = hr_dev->caps.idx_hop_num;
break; break;
default: default:
...@@ -206,7 +206,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -206,7 +206,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
* MTT/CQE alloc hem for bt pages. * MTT/CQE alloc hem for bt pages.
*/ */
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
chunk_ba_num = mhop->bt_chunk_size / 8; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
mhop->bt_chunk_size; mhop->bt_chunk_size;
table_idx = (*obj & (table->num_obj - 1)) / table_idx = (*obj & (table->num_obj - 1)) /
...@@ -436,7 +436,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ...@@ -436,7 +436,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
buf_chunk_size = mhop.buf_chunk_size; buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size; bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num; hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8; chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
bt_num = hns_roce_get_bt_num(table->type, hop_num); bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) { switch (bt_num) {
...@@ -646,7 +646,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, ...@@ -646,7 +646,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
bt_chunk_size = mhop.bt_chunk_size; bt_chunk_size = mhop.bt_chunk_size;
hop_num = mhop.hop_num; hop_num = mhop.hop_num;
chunk_ba_num = bt_chunk_size / 8; chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
bt_num = hns_roce_get_bt_num(table->type, hop_num); bt_num = hns_roce_get_bt_num(table->type, hop_num);
switch (bt_num) { switch (bt_num) {
...@@ -800,7 +800,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, ...@@ -800,7 +800,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
i = mhop.l0_idx; i = mhop.l0_idx;
j = mhop.l1_idx; j = mhop.l1_idx;
if (mhop.hop_num == 2) if (mhop.hop_num == 2)
hem_idx = i * (mhop.bt_chunk_size / 8) + j; hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
else if (mhop.hop_num == 1 || else if (mhop.hop_num == 1 ||
mhop.hop_num == HNS_ROCE_HOP_NUM_0) mhop.hop_num == HNS_ROCE_HOP_NUM_0)
hem_idx = i; hem_idx = i;
...@@ -1000,7 +1000,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -1000,7 +1000,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
} }
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8; bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
if (type >= HEM_TYPE_MTT) if (type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num; num_bt_l0 = bt_chunk_num;
......
...@@ -818,7 +818,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -818,7 +818,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.dest_qp_num = hr_qp->qpn; attr.dest_qp_num = hr_qp->qpn;
memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
hr_dev->dev_addr[port], hr_dev->dev_addr[port],
MAC_ADDR_OCTET_NUM); ETH_ALEN);
memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
......
...@@ -3426,7 +3426,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -3426,7 +3426,9 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
else else
roce_set_field(context->byte_4_sqpn_tst, roce_set_field(context->byte_4_sqpn_tst,
V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ? V2_QPC_BYTE_4_SGE_SHIFT_S,
hr_qp->sq.max_gs >
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0); ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
...@@ -3708,13 +3710,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3708,13 +3710,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_20_smac_sgid_idx, roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, 0); V2_QPC_BYTE_20_SGID_IDX_S, 0);
memcpy(&(context->dmac), dmac, 4); memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4]))); V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0; qpc_mask->dmac = 0;
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0); V2_QPC_BYTE_52_DMAC_S, 0);
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 4); V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
...@@ -3756,6 +3759,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -3756,6 +3759,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0); V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
/* rocee send 2^lp_sgen_ini segs every time */
roce_set_field(context->byte_168_irrl_idx, roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_LP_SGEN_INI_M, V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 3); V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
...@@ -3810,14 +3814,15 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -3810,14 +3814,15 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0); V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
context->sq_cur_sge_blk_addr = context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
((u32)(mtts[hr_qp->sge.offset / page_size] ((u32)(mtts[hr_qp->sge.offset / page_size] >>
>> PAGE_ADDR_SHIFT)) : 0; PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx, roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ? ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
(mtts[hr_qp->sge.offset / page_size] >> (mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0); (32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0; qpc_mask->sq_cur_sge_blk_addr = 0;
...@@ -4144,7 +4149,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4144,7 +4149,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
roce_set_field(context->byte_224_retry_msg, roce_set_field(context->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
attr->sq_psn >> 16); attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
roce_set_field(qpc_mask->byte_224_retry_msg, roce_set_field(qpc_mask->byte_224_retry_msg,
V2_QPC_BYTE_224_RETRY_MSG_PSN_M, V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
...@@ -4374,11 +4379,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4374,11 +4379,12 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S); V2_QPC_BYTE_56_DQPN_S);
qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en, qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RRE_S)) << 2) | V2_QPC_BYTE_76_RRE_S)) << V2_QP_RWE_S) |
((roce_get_bit(context->byte_76_srqn_op_en, ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RWE_S)) << 1) | V2_QPC_BYTE_76_RWE_S)) << V2_QP_RRE_S) |
((roce_get_bit(context->byte_76_srqn_op_en, ((roce_get_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_ATE_S)) << 3); V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
if (hr_qp->ibqp.qp_type == IB_QPT_RC || if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
hr_qp->ibqp.qp_type == IB_QPT_UC) { hr_qp->ibqp.qp_type == IB_QPT_UC) {
struct ib_global_route *grh = struct ib_global_route *grh =
...@@ -5150,8 +5156,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev, ...@@ -5150,8 +5156,8 @@ static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * (bt_chk_sz / 8) + j; idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
if ((i == eq->l0_last_num - 1) if ((i == eq->l0_last_num - 1)
&& j == eq->l1_last_num - 1) { && j == eq->l1_last_num - 1) {
eqe_alloc = (buf_chk_sz / eq->eqe_size) eqe_alloc = (buf_chk_sz / eq->eqe_size)
...@@ -5367,9 +5373,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5367,9 +5373,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT); buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT); bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
/ buf_chk_sz; buf_chk_sz);
bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8); bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
/* hop_num = 0 */ /* hop_num = 0 */
if (mhop_num == HNS_ROCE_HOP_NUM_0) { if (mhop_num == HNS_ROCE_HOP_NUM_0) {
...@@ -5414,12 +5420,12 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5414,12 +5420,12 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
goto err_dma_alloc_l0; goto err_dma_alloc_l0;
if (mhop_num == 1) { if (mhop_num == 1) {
if (ba_num > (bt_chk_sz / 8)) if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
dev_err(dev, "ba_num %d is too large for 1 hop\n", dev_err(dev, "ba_num %d is too large for 1 hop\n",
ba_num); ba_num);
/* alloc buf */ /* alloc buf */
for (i = 0; i < bt_chk_sz / 8; i++) { for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
if (eq_buf_cnt + 1 < ba_num) { if (eq_buf_cnt + 1 < ba_num) {
size = buf_chk_sz; size = buf_chk_sz;
} else { } else {
...@@ -5443,7 +5449,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5443,7 +5449,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
} else if (mhop_num == 2) { } else if (mhop_num == 2) {
/* alloc L1 BT and buf */ /* alloc L1 BT and buf */
for (i = 0; i < bt_chk_sz / 8; i++) { for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz, eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
&(eq->l1_dma[i]), &(eq->l1_dma[i]),
GFP_KERNEL); GFP_KERNEL);
...@@ -5451,8 +5457,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5451,8 +5457,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
goto err_dma_alloc_l1; goto err_dma_alloc_l1;
*(eq->bt_l0 + i) = eq->l1_dma[i]; *(eq->bt_l0 + i) = eq->l1_dma[i];
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
if (eq_buf_cnt + 1 < ba_num) { if (eq_buf_cnt + 1 < ba_num) {
size = buf_chk_sz; size = buf_chk_sz;
} else { } else {
...@@ -5497,8 +5503,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5497,8 +5503,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, buf_chk_sz, eq->buf[idx], dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
eq->buf_dma[idx]); eq->buf_dma[idx]);
} }
...@@ -5521,11 +5527,11 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev, ...@@ -5521,11 +5527,11 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i], dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
eq->l1_dma[i]); eq->l1_dma[i]);
for (j = 0; j < bt_chk_sz / 8; j++) { for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
if (i == record_i && j >= record_j) if (i == record_i && j >= record_j)
break; break;
idx = i * bt_chk_sz / 8 + j; idx = i * bt_chk_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, buf_chk_sz, dma_free_coherent(dev, buf_chk_sz,
eq->buf[idx], eq->buf[idx],
eq->buf_dma[idx]); eq->buf_dma[idx]);
...@@ -5982,7 +5988,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que) ...@@ -5982,7 +5988,7 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que)
bit_num = ffs(idx_que->bitmap[i]); bit_num = ffs(idx_que->bitmap[i]);
idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1)); idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
return i * sizeof(u64) * 8 + (bit_num - 1); return i * BITS_PER_LONG_LONG + (bit_num - 1);
} }
static void fill_idx_queue(struct hns_roce_idx_que *idx_que, static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
...@@ -6058,7 +6064,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6058,7 +6064,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
*/ */
wmb(); wmb();
srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn; srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
(srq->srqn & V2_DB_BYTE_4_TAG_M);
srq_db.parameter = srq->head; srq_db.parameter = srq->head;
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
......
...@@ -886,6 +886,10 @@ struct hns_roce_v2_qp_context { ...@@ -886,6 +886,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16 #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16) #define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
struct hns_roce_v2_cqe { struct hns_roce_v2_cqe {
__le32 byte_4; __le32 byte_4;
union { union {
......
...@@ -64,10 +64,10 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) ...@@ -64,10 +64,10 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
u8 phy_port; u8 phy_port;
u32 i = 0; u32 i = 0;
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM)) if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN))
return 0; return 0;
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++) for (i = 0; i < ETH_ALEN; i++)
hr_dev->dev_addr[port][i] = addr[i]; hr_dev->dev_addr[port][i] = addr[i];
phy_port = hr_dev->iboe.phy_port[port]; phy_port = hr_dev->iboe.phy_port[port];
...@@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ...@@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN; IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3; props->phys_state = (props->state == IB_PORT_ACTIVE) ?
HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED;
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
......
...@@ -314,11 +314,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -314,11 +314,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
if (i == loop_i && j >= loop_j) if (i == loop_i && j >= loop_j)
break; break;
bt_idx = i * pbl_bt_sz / 8 + j; bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, pbl_bt_sz, dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -329,8 +329,8 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -329,8 +329,8 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
bt_idx = i * pbl_bt_sz / 8 + j; bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
dma_free_coherent(dev, pbl_bt_sz, dma_free_coherent(dev, pbl_bt_sz,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -533,7 +533,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -533,7 +533,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long index = 0; unsigned long index = 0;
int ret = 0; int ret;
/* Allocate a key for mr from mr_table */ /* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
...@@ -559,7 +559,8 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova, ...@@ -559,7 +559,8 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} else { } else {
if (!hr_dev->caps.pbl_hop_num) { if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, mr->pbl_buf = dma_alloc_coherent(dev,
npages * BA_BYTE_LEN,
&(mr->pbl_dma_addr), &(mr->pbl_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_buf) if (!mr->pbl_buf)
...@@ -590,9 +591,8 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -590,9 +591,8 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
if (mhop_num == HNS_ROCE_HOP_NUM_0) if (mhop_num == HNS_ROCE_HOP_NUM_0)
return; return;
/* hop_num = 1 */
if (mhop_num == 1) { if (mhop_num == 1) {
dma_free_coherent(dev, (unsigned int)(npages * 8), dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
mr->pbl_buf, mr->pbl_dma_addr); mr->pbl_buf, mr->pbl_dma_addr);
return; return;
} }
...@@ -603,12 +603,13 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -603,12 +603,13 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
if (mhop_num == 2) { if (mhop_num == 2) {
for (i = 0; i < mr->l0_chunk_last_num; i++) { for (i = 0; i < mr->l0_chunk_last_num; i++) {
if (i == mr->l0_chunk_last_num - 1) { if (i == mr->l0_chunk_last_num - 1) {
npages_allocated = i * (pbl_bt_sz / 8); npages_allocated =
i * (pbl_bt_sz / BA_BYTE_LEN);
dma_free_coherent(dev, dma_free_coherent(dev,
(npages - npages_allocated) * 8, (npages - npages_allocated) * BA_BYTE_LEN,
mr->pbl_bt_l1[i], mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
break; break;
} }
...@@ -621,16 +622,17 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev, ...@@ -621,16 +622,17 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i], dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
mr->pbl_l1_dma_addr[i]); mr->pbl_l1_dma_addr[i]);
for (j = 0; j < pbl_bt_sz / 8; j++) { for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
bt_idx = i * (pbl_bt_sz / 8) + j; bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
if ((i == mr->l0_chunk_last_num - 1) if ((i == mr->l0_chunk_last_num - 1)
&& j == mr->l1_chunk_last_num - 1) { && j == mr->l1_chunk_last_num - 1) {
npages_allocated = bt_idx * npages_allocated = bt_idx *
(pbl_bt_sz / 8); (pbl_bt_sz / BA_BYTE_LEN);
dma_free_coherent(dev, dma_free_coherent(dev,
(npages - npages_allocated) * 8, (npages - npages_allocated) *
BA_BYTE_LEN,
mr->pbl_bt_l2[bt_idx], mr->pbl_bt_l2[bt_idx],
mr->pbl_l2_dma_addr[bt_idx]); mr->pbl_l2_dma_addr[bt_idx]);
...@@ -675,7 +677,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, ...@@ -675,7 +677,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
npages = ib_umem_page_count(mr->umem); npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num) if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8), dma_free_coherent(dev,
(unsigned int)(npages * BA_BYTE_LEN),
mr->pbl_buf, mr->pbl_dma_addr); mr->pbl_buf, mr->pbl_dma_addr);
else else
hns_roce_mhop_free(hr_dev, mr); hns_roce_mhop_free(hr_dev, mr);
...@@ -1059,6 +1062,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, ...@@ -1059,6 +1062,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
page_addr = sg_page_iter_dma_address(&sg_iter); page_addr = sg_page_iter_dma_address(&sg_iter);
if (!hr_dev->caps.pbl_hop_num) { if (!hr_dev->caps.pbl_hop_num) {
/* for hip06, page addr is aligned to 4K */
mr->pbl_buf[i++] = page_addr >> 12; mr->pbl_buf[i++] = page_addr >> 12;
} else if (hr_dev->caps.pbl_hop_num == 1) { } else if (hr_dev->caps.pbl_hop_num == 1) {
mr->pbl_buf[i++] = page_addr; mr->pbl_buf[i++] = page_addr;
...@@ -1069,7 +1073,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev, ...@@ -1069,7 +1073,7 @@ static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l2[i][j] = page_addr; mr->pbl_bt_l2[i][j] = page_addr;
j++; j++;
if (j >= (pbl_bt_sz / 8)) { if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
i++; i++;
j = 0; j = 0;
} }
...@@ -1117,7 +1121,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1117,7 +1121,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} else { } else {
u64 pbl_size = 1; u64 pbl_size = 1;
bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8; bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
BA_BYTE_LEN;
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++) for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
pbl_size *= bt_size; pbl_size *= bt_size;
if (n > pbl_size) { if (n > pbl_size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment