Commit 45225a93 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/irdma: Propagate error codes

All functions now return linux error codes. Propagate the return from
these functions as opposed to converting them to generic values.

Link: https://lore.kernel.org/r/20220217151851.1518-3-shiraz.saleem@intel.comSigned-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 2c4b14ea
......@@ -1099,7 +1099,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
return -EINVAL;
return status;
}
msix_vec->ceq_id = ceq_id;
......
......@@ -176,7 +176,7 @@ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
ret = ice_add_rdma_qset(pf, &qset);
if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return -EINVAL;
return ret;
}
tc_node->l2_sched_node_id = qset.teid;
......@@ -280,10 +280,9 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
err = -EIO;
err = irdma_ctrl_init_hw(rf);
if (err)
goto err_ctrl_init;
}
l2params.mtu = iwdev->netdev->mtu;
ice_get_qos_params(pf, &qos_info);
......@@ -291,10 +290,9 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
if (irdma_rt_init_hw(iwdev, &l2params)) {
err = -EIO;
err = irdma_rt_init_hw(iwdev, &l2params);
if (err)
goto err_rt_init;
}
err = irdma_ib_register_device(iwdev);
if (err)
......
......@@ -603,7 +603,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
&sqdepth);
if (status)
return -ENOMEM;
return status;
if (uk_attrs->hw_rev == IRDMA_GEN_1)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
......@@ -614,7 +614,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
&rqdepth);
if (status)
return -ENOMEM;
return status;
iwqp->kqp.sq_wrid_mem =
kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
......@@ -688,7 +688,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
return status ? -ENOMEM : 0;
return status;
}
static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
......@@ -2316,7 +2316,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
false);
if (status)
return -ENOMEM;
return status;
iwpbl->pbl_allocated = true;
level = palloc->level;
......@@ -2457,7 +2457,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
return status ? -ENOMEM : 0;
return status;
}
/**
......@@ -3553,7 +3553,7 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
__func__, ret);
return -EINVAL;
return ret;
}
/**
......@@ -3873,10 +3873,8 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
if (status)
return -ENOMEM;
return 0;
return status;
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment