Commit dc93a0d9 authored by Lang Cheng's avatar Lang Cheng Committed by Jason Gunthorpe

RDMA/hns: Fix coding style issues

Just format the code without modifying anything, including fixing some
redundant and missing blanks and spaces and changing the variable
definition order.

Link: https://lore.kernel.org/r/1607650657-35992-8-git-send-email-liweihang@huawei.comSigned-off-by: default avatarLang Cheng <chenglang@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 29b52027
...@@ -93,8 +93,8 @@ static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -93,8 +93,8 @@ static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
u64 out_param) u64 out_param)
{ {
struct hns_roce_cmd_context struct hns_roce_cmd_context *context =
*context = &hr_dev->cmd.context[token & hr_dev->cmd.token_mask]; &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds];
if (token != context->token) if (token != context->token)
return; return;
...@@ -164,8 +164,8 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -164,8 +164,8 @@ static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
int ret; int ret;
down(&hr_dev->cmd.event_sem); down(&hr_dev->cmd.event_sem);
ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier,
in_modifier, op_modifier, op, timeout); op_modifier, op, timeout);
up(&hr_dev->cmd.event_sem); up(&hr_dev->cmd.event_sem);
return ret; return ret;
...@@ -231,9 +231,8 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) ...@@ -231,9 +231,8 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
int i; int i;
hr_cmd->context = kmalloc_array(hr_cmd->max_cmds, hr_cmd->context =
sizeof(*hr_cmd->context), kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
GFP_KERNEL);
if (!hr_cmd->context) if (!hr_cmd->context)
return -ENOMEM; return -ENOMEM;
...@@ -262,8 +261,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) ...@@ -262,8 +261,8 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
hr_cmd->use_events = 0; hr_cmd->use_events = 0;
} }
struct hns_roce_cmd_mailbox struct hns_roce_cmd_mailbox *
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev) hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
...@@ -271,8 +270,8 @@ struct hns_roce_cmd_mailbox ...@@ -271,8 +270,8 @@ struct hns_roce_cmd_mailbox
if (!mailbox) if (!mailbox)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mailbox->buf = dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, mailbox->buf =
&mailbox->dma); dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma);
if (!mailbox->buf) { if (!mailbox->buf) {
kfree(mailbox); kfree(mailbox);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -143,8 +143,8 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, ...@@ -143,8 +143,8 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long in_modifier, u8 op_modifier, u16 op,
unsigned long timeout); unsigned long timeout);
struct hns_roce_cmd_mailbox struct hns_roce_cmd_mailbox *
*hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox); struct hns_roce_cmd_mailbox *mailbox);
......
...@@ -40,9 +40,9 @@ ...@@ -40,9 +40,9 @@
static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cq_table *cq_table; struct hns_roce_cq_table *cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle; dma_addr_t dma_handle;
int ret; int ret;
......
...@@ -209,9 +209,9 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, ...@@ -209,9 +209,9 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 chunk_ba_num; u32 chunk_ba_num;
u32 chunk_size;
u32 table_idx; u32 table_idx;
u32 bt_num; u32 bt_num;
u32 chunk_size;
if (get_hem_table_config(hr_dev, mhop, table->type)) if (get_hem_table_config(hr_dev, mhop, table->type))
return -EINVAL; return -EINVAL;
...@@ -343,15 +343,15 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, ...@@ -343,15 +343,15 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
{ {
spinlock_t *lock = &hr_dev->bt_cmd_lock; spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
long end;
unsigned long flags;
struct hns_roce_hem_iter iter; struct hns_roce_hem_iter iter;
void __iomem *bt_cmd; void __iomem *bt_cmd;
__le32 bt_cmd_val[2]; __le32 bt_cmd_val[2];
__le32 bt_cmd_h = 0; __le32 bt_cmd_h = 0;
unsigned long flags;
__le32 bt_cmd_l; __le32 bt_cmd_l;
u64 bt_ba;
int ret = 0; int ret = 0;
u64 bt_ba;
long end;
/* Find the HEM(Hardware Entry Memory) entry */ /* Find the HEM(Hardware Entry Memory) entry */
unsigned long i = (obj & (table->num_obj - 1)) / unsigned long i = (obj & (table->num_obj - 1)) /
...@@ -651,8 +651,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev, ...@@ -651,8 +651,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret = 0;
unsigned long i; unsigned long i;
int ret = 0;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) if (hns_roce_check_whether_mhop(hr_dev, table->type))
return hns_roce_table_mhop_get(hr_dev, table, obj); return hns_roce_table_mhop_get(hr_dev, table, obj);
...@@ -800,14 +800,14 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, ...@@ -800,14 +800,14 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_chunk *chunk; struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem; struct hns_roce_hem *hem;
void *addr = NULL;
unsigned long mhop_obj = obj; unsigned long mhop_obj = obj;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long idx_offset; unsigned long idx_offset;
int offset, dma_offset; int offset, dma_offset;
void *addr = NULL;
u32 hem_idx = 0;
int length; int length;
int i, j; int i, j;
u32 hem_idx = 0;
if (!table->lowmem) if (!table->lowmem)
return NULL; return NULL;
...@@ -977,8 +977,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev, ...@@ -977,8 +977,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
u32 buf_chunk_size; u32 buf_chunk_size;
int i;
u64 obj; u64 obj;
int i;
if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop)) if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
return; return;
...@@ -1313,8 +1313,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, ...@@ -1313,8 +1313,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
const struct hns_roce_buf_region *regions, const struct hns_roce_buf_region *regions,
int region_cnt) int region_cnt)
{ {
struct roce_hem_item *hem, *temp_hem, *root_hem;
struct list_head temp_list[HNS_ROCE_MAX_BT_REGION]; struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
struct roce_hem_item *hem, *temp_hem, *root_hem;
const struct hns_roce_buf_region *r; const struct hns_roce_buf_region *r;
struct list_head temp_root; struct list_head temp_root;
struct list_head temp_btm; struct list_head temp_btm;
...@@ -1419,8 +1419,8 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, ...@@ -1419,8 +1419,8 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{ {
const struct hns_roce_buf_region *r; const struct hns_roce_buf_region *r;
int ofs, end; int ofs, end;
int ret;
int unit; int unit;
int ret;
int i; int i;
if (region_cnt > HNS_ROCE_MAX_BT_REGION) { if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
......
...@@ -175,4 +175,4 @@ static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter) ...@@ -175,4 +175,4 @@ static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
return sg_dma_address(&iter->chunk->mem[iter->page_idx]); return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
} }
#endif /*_HNS_ROCE_HEM_H*/ #endif /* _HNS_ROCE_HEM_H */
...@@ -239,7 +239,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, ...@@ -239,7 +239,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
break; break;
} }
/*Ctrl field, ctrl set type: sig, solic, imm, fence */ /* Ctrl field, ctrl set type: sig, solic, imm, fence */
/* SO wait for conforming application scenarios */ /* SO wait for conforming application scenarios */
ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
...@@ -300,7 +300,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, ...@@ -300,7 +300,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
} }
ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
} else { } else {
/*sqe num is two */ /* sqe num is two */
for (i = 0; i < wr->num_sge; i++) for (i = 0; i < wr->num_sge; i++)
set_data_seg(dseg + i, wr->sg_list + i); set_data_seg(dseg + i, wr->sg_list + i);
...@@ -1165,7 +1165,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) ...@@ -1165,7 +1165,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
} }
raq->e_raq_buf->map = addr; raq->e_raq_buf->map = addr;
/* Configure raq extended address. 48bit 4K align*/ /* Configure raq extended address. 48bit 4K align */
roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
/* Configure raq_shift */ /* Configure raq_shift */
...@@ -2760,7 +2760,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2760,7 +2760,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_16, roce_set_field(context->qpc_bytes_16,
QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
roce_set_field(context->qpc_bytes_4, roce_set_field(context->qpc_bytes_4,
QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
...@@ -3795,7 +3794,6 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -3795,7 +3794,6 @@ static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
int event_type; int event_type;
while ((aeqe = next_aeqe_sw_v1(eq))) { while ((aeqe = next_aeqe_sw_v1(eq))) {
/* Make sure we read the AEQ entry after we have checked the /* Make sure we read the AEQ entry after we have checked the
* ownership bit * ownership bit
*/ */
...@@ -3900,7 +3898,6 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, ...@@ -3900,7 +3898,6 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
u32 cqn; u32 cqn;
while ((ceqe = next_ceqe_sw_v1(eq))) { while ((ceqe = next_ceqe_sw_v1(eq))) {
/* Make sure we read CEQ entry after we have checked the /* Make sure we read CEQ entry after we have checked the
* ownership bit * ownership bit
*/ */
......
...@@ -419,7 +419,7 @@ struct hns_roce_wqe_data_seg { ...@@ -419,7 +419,7 @@ struct hns_roce_wqe_data_seg {
struct hns_roce_wqe_raddr_seg { struct hns_roce_wqe_raddr_seg {
__le32 rkey; __le32 rkey;
__le32 len;/* reserved */ __le32 len; /* reserved */
__le64 raddr; __le64 raddr;
}; };
......
...@@ -2451,7 +2451,6 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev, ...@@ -2451,7 +2451,6 @@ static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
if (i < (pg_num - 1)) if (i < (pg_num - 1))
entry[i].blk_ba1_nxt_ptr |= entry[i].blk_ba1_nxt_ptr |=
(i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S; (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
} }
link_tbl->npages = pg_num; link_tbl->npages = pg_num;
link_tbl->pg_sz = buf_chk_sz; link_tbl->pg_sz = buf_chk_sz;
...@@ -5619,16 +5618,14 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, ...@@ -5619,16 +5618,14 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
hns_roce_cq_event(hr_dev, cqn, event_type); hns_roce_cq_event(hr_dev, cqn, event_type);
break; break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
break;
case HNS_ROCE_EVENT_TYPE_MB: case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev, hns_roce_cmd_event(hr_dev,
le16_to_cpu(aeqe->event.cmd.token), le16_to_cpu(aeqe->event.cmd.token),
aeqe->event.cmd.status, aeqe->event.cmd.status,
le64_to_cpu(aeqe->event.cmd.out_param)); le64_to_cpu(aeqe->event.cmd.out_param));
break; break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
break;
case HNS_ROCE_EVENT_TYPE_FLR: case HNS_ROCE_EVENT_TYPE_FLR:
break; break;
default: default:
......
...@@ -449,7 +449,7 @@ struct hns_roce_srq_context { ...@@ -449,7 +449,7 @@ struct hns_roce_srq_context {
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1 #define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_S 1
#define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1) #define SRQC_BYTE_60_SRQ_DB_RECORD_ADDR_M GENMASK(31, 1)
enum{ enum {
V2_MPT_ST_VALID = 0x1, V2_MPT_ST_VALID = 0x1,
V2_MPT_ST_FREE = 0x2, V2_MPT_ST_FREE = 0x2,
}; };
...@@ -1094,8 +1094,8 @@ struct hns_roce_v2_ud_send_wqe { ...@@ -1094,8 +1094,8 @@ struct hns_roce_v2_ud_send_wqe {
u8 sgid_index; u8 sgid_index;
u8 smac_index; u8 smac_index;
u8 dgid[GID_LEN_V2]; u8 dgid[GID_LEN_V2];
}; };
#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0 #define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) #define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
......
...@@ -555,8 +555,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -555,8 +555,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{ {
int ret;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
...@@ -713,8 +713,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ...@@ -713,8 +713,8 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
*/ */
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{ {
int ret;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret;
spin_lock_init(&hr_dev->sm_lock); spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock); spin_lock_init(&hr_dev->bt_cmd_lock);
...@@ -838,8 +838,8 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) ...@@ -838,8 +838,8 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev)
int hns_roce_init(struct hns_roce_dev *hr_dev) int hns_roce_init(struct hns_roce_dev *hr_dev)
{ {
int ret;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret;
if (hr_dev->hw->reset) { if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true); ret = hr_dev->hw->reset(hr_dev, true);
......
...@@ -167,10 +167,10 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, ...@@ -167,10 +167,10 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key); unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct device *dev = hr_dev->dev;
int ret;
/* Allocate mailbox memory */ /* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
......
...@@ -113,8 +113,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) ...@@ -113,8 +113,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type) enum hns_roce_event type)
{ {
struct ib_event event;
struct ib_qp *ibqp = &hr_qp->ibqp; struct ib_qp *ibqp = &hr_qp->ibqp;
struct ib_event event;
if (ibqp->event_handler) { if (ibqp->event_handler) {
event.device = ibqp->device; event.device = ibqp->device;
......
...@@ -239,7 +239,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, ...@@ -239,7 +239,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
err = -ENOMEM; err = -ENOMEM;
goto err_idx_mtr; goto err_idx_mtr;
} }
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment