Commit edb20a1b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "Fourth set of -rc fixes for 4.13 cycle. This is all of the -rc fixes
  that we know of. I suspect this will be the last rc pull request, but
  you never know, I could be wrong.

  Nothing major here. There are the i40iw patches I mentioned in my last
  pull request minus one that I pulled out because it wasn't a fix and
  not appropriate for the rc cycle. Then a few other items trickled in
  and were added to the pull request. It's fairly small aside from those
  five i40iw patches

   - Set of five i40iw fixes (the first of these is rather large by line
     count consideration, but I decided to send it because if fixes a
     legitimate issue and the line count is because it does so by
     creating a new function and using it where needed instead of just
     patching up a few lines...a smaller fix could probably be done, but
     the larger fix is the better code solution)

   - One vmw_pvrdma fix

   - One hns_roce fix (this silences a checker warning, but can't
     actually happen, I expect a patch to remove this from all drivers
     that share this same check in for-next)

   - One iw_cxgb4 fix

   - Two IB core fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/uverbs: Fix NULL pointer dereference during device removal
  IB/core: Protect sysfs entry on ib_unregister_device
  iw_cxgb4: fix misuse of integer variable
  IB/hns: fix memory leak on ah on error return path
  i40iw: Fix potential fcn_id_array out of bounds
  i40iw: Use correct alignment for CQ0 memory
  i40iw: Fix typecast of tcp_seq_num
  i40iw: Correct variable names
  i40iw: Fix parsing of query/commit FPM buffers
  RDMA/vmw_pvrdma: Report CQ missed events
parents 039a8e38 870201f9
...@@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device) ...@@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device)
} }
up_read(&lists_rwsem); up_read(&lists_rwsem);
mutex_unlock(&device_mutex);
ib_device_unregister_rdmacg(device); ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device); ib_device_unregister_sysfs(device);
mutex_unlock(&device_mutex);
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
ib_security_destroy_port_pkey_list(device); ib_security_destroy_port_pkey_list(device);
......
...@@ -1153,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ...@@ -1153,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kref_get(&file->ref); kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex); mutex_unlock(&uverbs_dev->lists_mutex);
ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext; ucontext = file->ucontext;
...@@ -1170,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ...@@ -1170,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* for example due to freeing the resources * for example due to freeing the resources
* (e.g mmput). * (e.g mmput).
*/ */
ib_uverbs_event_handler(&file->event_handler, &event);
ib_dev->disassociate_ucontext(ucontext); ib_dev->disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true); ib_uverbs_cleanup_ucontext(file, ucontext, true);
......
...@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, ...@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
rhp = php->rhp; rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG || if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
use_dsgl)) use_dsgl))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
} else { } else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
if (!dmac) if (!dmac) {
kfree(ah);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
}
memcpy(ah->av.mac, dmac, ETH_ALEN); memcpy(ah->av.mac, dmac, ETH_ALEN);
} }
......
...@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( ...@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
u64 base = 0; u64 base = 0;
u32 i, j; u32 i, j;
u32 k = 0; u32 k = 0;
u32 low;
/* copy base values in obj_info */ /* copy base values in obj_info */
for (i = I40IW_HMC_IW_QP, j = 0; for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
i <= I40IW_HMC_IW_PBLE; i++, j += 8) { if ((i == I40IW_HMC_IW_SRQ) ||
(i == I40IW_HMC_IW_FSIMC) ||
(i == I40IW_HMC_IW_FSIAV)) {
info[i].base = 0;
info[i].cnt = 0;
continue;
}
get_64bit_val(buf, j, &temp); get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512; info[i].base = RS_64_1(temp, 32) * 512;
if (info[i].base > base) { if (info[i].base > base) {
base = info[i].base; base = info[i].base;
k = i; k = i;
} }
low = (u32)(temp); if (i == I40IW_HMC_IW_APBVT_ENTRY) {
if (low) info[i].cnt = 1;
info[i].cnt = low; continue;
}
if (i == I40IW_HMC_IW_QP)
info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
else if (i == I40IW_HMC_IW_CQ)
info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
else
info[i].cnt = (u32)(temp);
} }
size = info[k].cnt * info[k].size + info[k].base; size = info[k].cnt * info[k].size + info[k].base;
if (size & 0x1FFFFF) if (size & 0x1FFFFF)
...@@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( ...@@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
return 0; return 0;
} }
/**
* i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
* @buf: ptr to fpm query buffer
* @buf_idx: index into buf
* @info: ptr to i40iw_hmc_obj_info struct
* @rsrc_idx: resource index into info
*
* Decode a 64 bit value from fpm query buffer into max count and size
*/
static u64 i40iw_sc_decode_fpm_query(u64 *buf,
u32 buf_idx,
struct i40iw_hmc_obj_info *obj_info,
u32 rsrc_idx)
{
u64 temp;
u32 size;
get_64bit_val(buf, buf_idx, &temp);
obj_info[rsrc_idx].max_cnt = (u32)temp;
size = (u32)RS_64_1(temp, 32);
obj_info[rsrc_idx].size = LS_64_1(1, size);
return temp;
}
/** /**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer * @buf: ptr to fpm query buffer
...@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( ...@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
struct i40iw_hmc_info *hmc_info, struct i40iw_hmc_info *hmc_info,
struct i40iw_hmc_fpm_misc *hmc_fpm_misc) struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
{ {
u64 temp;
struct i40iw_hmc_obj_info *obj_info; struct i40iw_hmc_obj_info *obj_info;
u32 i, j, size; u64 temp;
u32 size;
u16 max_pe_sds; u16 max_pe_sds;
obj_info = hmc_info->hmc_obj; obj_info = hmc_info->hmc_obj;
...@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( ...@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
hmc_fpm_misc->max_sds = max_pe_sds; hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
for (i = I40IW_HMC_IW_QP, j = 8; get_64bit_val(buf, 8, &temp);
i <= I40IW_HMC_IW_ARP; i++, j += 8) { obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
get_64bit_val(buf, j, &temp); size = (u32)RS_64_1(temp, 32);
if (i == I40IW_HMC_IW_QP) obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
else if (i == I40IW_HMC_IW_CQ)
obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
else
obj_info[i].max_cnt = (u32)temp;
size = (u32)RS_64_1(temp, 32); get_64bit_val(buf, 16, &temp);
obj_info[i].size = ((u64)1 << size); obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
} size = (u32)RS_64_1(temp, 32);
for (i = I40IW_HMC_IW_MR, j = 48; obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
get_64bit_val(buf, j, &temp); i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
obj_info[i].max_cnt = (u32)temp; i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
size = (u32)RS_64_1(temp, 32);
obj_info[i].size = LS_64_1(1, size); obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
} obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
get_64bit_val(buf, 64, &temp); get_64bit_val(buf, 64, &temp);
obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
if (!hmc_fpm_misc->xf_block_size) if (!hmc_fpm_misc->xf_block_size)
return I40IW_ERR_INVALID_SIZE; return I40IW_ERR_INVALID_SIZE;
i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
get_64bit_val(buf, 80, &temp); get_64bit_val(buf, 80, &temp);
obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_Q1FL].size = 4;
hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
if (!hmc_fpm_misc->q1_block_size) if (!hmc_fpm_misc->q1_block_size)
return I40IW_ERR_INVALID_SIZE; return I40IW_ERR_INVALID_SIZE;
i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
get_64bit_val(buf, 112, &temp);
obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_PBLE].size = 8;
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
return 0; return 0;
} }
...@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ ...@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
hmc_info->sd_table.sd_entry = virt_mem.va; hmc_info->sd_table.sd_entry = virt_mem.va;
} }
/* fill size of objects which are fixed */
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
return ret_code; return ret_code;
} }
...@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) ...@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{ {
u8 fcn_id = vsi->fcn_id; u8 fcn_id = vsi->fcn_id;
if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
vsi->dev->fcn_id_array[fcn_id] = false; vsi->dev->fcn_id_array[fcn_id] = false;
i40iw_hw_stats_stop_timer(vsi); i40iw_hw_stats_stop_timer(vsi);
} }
......
...@@ -1507,8 +1507,8 @@ enum { ...@@ -1507,8 +1507,8 @@ enum {
I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
I40IW_SHADOWAREA_MASK = (128 - 1), I40IW_SHADOWAREA_MASK = (128 - 1),
I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
}; };
enum i40iw_alignment { enum i40iw_alignment {
......
...@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) ...@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
I40IW_CQ0_ALIGNMENT_MASK); I40IW_CQ0_ALIGNMENT);
if (ret) if (ret)
return ret; return ret;
......
...@@ -62,7 +62,7 @@ enum i40iw_status_code { ...@@ -62,7 +62,7 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_ALIGNMENT = -23, I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24, I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27, I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28, I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29, I40IW_ERR_CQP_COMPL_ERROR = -29,
......
...@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, ...@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_rdma_write; op_info = &info->op.inline_rdma_write;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code) if (ret_code)
...@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, ...@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_send; op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code) if (ret_code)
...@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, ...@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2); get_64bit_val(cqe, 16, &qword2);
info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
...@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, ...@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size) u8 *wqe_size)
{ {
if (data_size > I40IW_MAX_INLINE_DATA_SIZE) if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
if (data_size <= 16) if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE; *wqe_size = I40IW_QP_WQE_MIN_SIZE;
......
...@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, ...@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
struct pvrdma_dev *dev = to_vdev(ibcq->device); struct pvrdma_dev *dev = to_vdev(ibcq->device);
struct pvrdma_cq *cq = to_vcq(ibcq); struct pvrdma_cq *cq = to_vcq(ibcq);
u32 val = cq->cq_handle; u32 val = cq->cq_handle;
unsigned long flags;
int has_data = 0;
val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
spin_lock_irqsave(&cq->cq_lock, flags);
pvrdma_write_uar_cq(dev, val); pvrdma_write_uar_cq(dev, val);
return 0; if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
unsigned int head;
has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
cq->ibcq.cqe, &head);
if (unlikely(has_data == PVRDMA_INVALID_IDX))
dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
return has_data;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment