Commit d54699e2 authored by Tony Nguyen's avatar Tony Nguyen

ice: Remove enum ice_status

Replace uses of ice_status to, as equivalent as possible, error codes.
Remove enum ice_status and its helper conversion function as they are no
longer needed.
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: default avatarGurucharan G <gurucharanx.g@intel.com>
parent 5e24d598
......@@ -953,13 +953,13 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
* active reset flow, ICE_ERR_RESET_ONGOING is returned.
* active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
} else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
......
This diff is collapsed.
......@@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
......@@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0;
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
}
return 0;
......@@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
cq->rq.desc_buf.size = size;
return 0;
}
......@@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */
......@@ -218,7 +218,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
}
/**
......@@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */
......@@ -266,7 +266,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
}
static int
......@@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
return ICE_ERR_AQ_ERROR;
return -EIO;
return 0;
}
......@@ -367,13 +367,13 @@ static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->sq.count > 0) {
/* queue already initialized */
ret_code = ICE_ERR_NOT_READY;
ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) {
ret_code = ICE_ERR_CFG;
ret_code = -EIO;
goto init_ctrlq_exit;
}
......@@ -427,13 +427,13 @@ static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (cq->rq.count > 0) {
/* queue already initialized */
ret_code = ICE_ERR_NOT_READY;
ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) {
ret_code = ICE_ERR_CFG;
ret_code = -EIO;
goto init_ctrlq_exit;
}
......@@ -482,7 +482,7 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
mutex_lock(&cq->sq_lock);
if (!cq->sq.count) {
ret_code = ICE_ERR_NOT_READY;
ret_code = -EBUSY;
goto shutdown_sq_out;
}
......@@ -549,7 +549,7 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
ret_code = ICE_ERR_NOT_READY;
ret_code = -EBUSY;
goto shutdown_rq_out;
}
......@@ -586,7 +586,7 @@ static int ice_init_check_adminq(struct ice_hw *hw)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) {
status = ICE_ERR_FW_API_VER;
status = -EIO;
goto init_ctrlq_free_rq;
}
......@@ -631,14 +631,14 @@ static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq;
break;
default:
return ICE_ERR_PARAM;
return -EINVAL;
}
cq->qtype = q_type;
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG;
return -EIO;
}
/* setup SQ command write back timeout */
......@@ -763,7 +763,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
return status;
status = ice_init_check_adminq(hw);
if (status != ICE_ERR_AQ_FW_CRITICAL)
if (status != -EIO)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
......@@ -978,19 +978,19 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
return ICE_ERR_RESET_ONGOING;
return -EBUSY;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY;
status = -EIO;
goto sq_send_command_error;
}
if ((buf && !buf_size) || (!buf && buf_size)) {
status = ICE_ERR_PARAM;
status = -EINVAL;
goto sq_send_command_error;
}
......@@ -998,7 +998,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
status = ICE_ERR_INVAL_SIZE;
status = -EINVAL;
goto sq_send_command_error;
}
......@@ -1011,7 +1011,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
status = ICE_ERR_AQ_EMPTY;
status = -EIO;
goto sq_send_command_error;
}
......@@ -1028,7 +1028,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL;
status = -ENOSPC;
goto sq_send_command_error;
}
......@@ -1082,7 +1082,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
status = ICE_ERR_AQ_ERROR;
status = -EIO;
} else {
memcpy(buf, dma_buf->va, copy_size);
}
......@@ -1098,7 +1098,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK)
status = ICE_ERR_AQ_ERROR;
status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval;
}
......@@ -1116,10 +1116,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL;
status = -EIO;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
status = -EIO;
}
}
......@@ -1176,7 +1176,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY;
ret_code = -EIO;
goto clean_rq_elem_err;
}
......@@ -1185,7 +1185,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = ICE_ERR_AQ_NO_WORK;
ret_code = -EALREADY;
goto clean_rq_elem_out;
}
......@@ -1196,7 +1196,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
}
......
......@@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
......@@ -31,7 +30,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf)
return ICE_ERR_PARAM;
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
......@@ -609,7 +608,7 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
u16 len;
if (!lldpmib || !dcbcfg)
return ICE_ERR_PARAM;
return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
......@@ -659,7 +658,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
......@@ -684,7 +683,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
* returns ICE_SUCCESS, caller will need to check if FW returns back the same
* returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
int
......@@ -762,7 +761,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
return ice_status_to_errno(status);
return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
......@@ -910,7 +909,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
int ret;
if (!pi)
return ICE_ERR_PARAM;
return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
......@@ -950,7 +949,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
int ret;
if (!pi)
return ICE_ERR_PARAM;
return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
......@@ -980,7 +979,7 @@ int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
int ret = 0;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true;
......@@ -996,7 +995,7 @@ int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret;
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
return -EBUSY;
}
/* Configure the LLDP MIB change event */
......@@ -1022,13 +1021,13 @@ int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
int ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
return -EOPNOTSUPP;
/* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY;
return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
......@@ -1478,7 +1477,7 @@ int ice_set_dcb_cfg(struct ice_port_info *pi)
u16 miblen;
if (!pi)
return ICE_ERR_PARAM;
return -EINVAL;
hw = pi->hw;
......@@ -1487,7 +1486,7 @@ int ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
......@@ -1521,7 +1520,7 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
int status;
if (!pi)
return ICE_ERR_PARAM;
return -EINVAL;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
......@@ -1548,7 +1547,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
u8 i, j;
if (!pi)
return ICE_ERR_PARAM;
return -EINVAL;
/* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
......
......@@ -584,19 +584,19 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
TNL_SEG_CNT(tun), &prof);
if (status)
return ice_status_to_errno(status);
return status;
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
if (status) {
err = ice_status_to_errno(status);
err = status;
goto err_prof;
}
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
if (status) {
err = ice_status_to_errno(status);
err = status;
goto err_entry;
}
......@@ -1211,7 +1211,7 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (status) {
err = ice_status_to_errno(status);
err = status;
goto err_free_all;
}
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
......@@ -1226,7 +1226,7 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
is_tun);
if (status) {
err = ice_status_to_errno(status);
err = status;
goto err_frag;
}
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
......
......@@ -919,15 +919,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow)
break;
if (idx == ICE_FDIR_NUM_PKT)
return ICE_ERR_PARAM;
return -EINVAL;
if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
return ICE_ERR_DOES_NOT_EXIST;
return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
......@@ -1111,7 +1111,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
return ICE_ERR_PARAM;
return -EINVAL;
}
if (input->flex_fltr)
......
......@@ -634,12 +634,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
return ICE_ERR_PARAM;
return -EINVAL;
/* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
return ICE_ERR_PARAM;
return -EINVAL;
}
return 0;
......@@ -1035,7 +1035,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF;
break;
default:
return ICE_ERR_NOT_IMPL;
return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a
......@@ -1073,7 +1073,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
return ICE_ERR_MAX_LIMIT;
return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
......@@ -1112,12 +1112,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws))
return ICE_ERR_MAX_LIMIT;
return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz)
return ICE_ERR_PARAM;
return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
......@@ -1150,7 +1150,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS)
return ICE_ERR_MAX_LIMIT;
return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
......@@ -1229,7 +1229,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0;
break;
default:
return ICE_ERR_NOT_IMPL;
return -EOPNOTSUPP;
}
return status;
......@@ -1334,7 +1334,7 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry)
{
if (!entry)
return ICE_ERR_BAD_PTR;
return -EINVAL;
list_del(&entry->l_entry);
......@@ -1366,16 +1366,16 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
u8 i;
if (!prof)
return ICE_ERR_BAD_PTR;
return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL);
if (!params->prof) {
status = ICE_ERR_NO_MEMORY;
status = -ENOMEM;
goto free_params;
}
......@@ -1544,13 +1544,13 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_MAX_LIMIT;
return -ENOSPC;
if (!segs_cnt)
return ICE_ERR_PARAM;
return -EINVAL;
if (!segs)
return ICE_ERR_BAD_PTR;
return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt);
if (status)
......@@ -1584,7 +1584,7 @@ ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST;
status = -ENOENT;
goto out;
}
......@@ -1619,23 +1619,23 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
/* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS))
return ICE_ERR_BAD_PTR;
return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST;
status = -ENOENT;
} else {
/* Allocate memory for the entry being added and associate
* the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e)
status = ICE_ERR_NO_MEMORY;
status = -ENOMEM;
else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
......@@ -1654,7 +1654,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS:
break;
default:
status = ICE_ERR_NOT_IMPL;
status = -EOPNOTSUPP;
goto out;
}
......@@ -1688,7 +1688,7 @@ int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
return ICE_ERR_PARAM;
return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
......@@ -1853,15 +1853,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
return ICE_ERR_CFG;
return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
return ICE_ERR_CFG;
return -EIO;
return 0;
}
......@@ -1906,7 +1906,7 @@ int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
if (list_empty(&hw->fl_profs[blk]))
return 0;
......@@ -1981,7 +1981,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL);
if (!rss_cfg)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
......@@ -2032,11 +2032,11 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_PARAM;
return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
......@@ -2136,7 +2136,7 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
......@@ -2170,7 +2170,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
......@@ -2182,7 +2182,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
status = ICE_ERR_DOES_NOT_EXIST;
status = -ENOENT;
goto out;
}
......@@ -2224,7 +2224,7 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
......@@ -2287,12 +2287,12 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
/* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
return ICE_ERR_CFG;
return -EIO;
hash_flds = avf_hash;
......@@ -2352,7 +2352,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
}
if (rss_hash == ICE_HASH_INVALID)
return ICE_ERR_OUT_OF_RANGE;
return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
......@@ -2374,7 +2374,7 @@ int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
struct ice_rss_cfg *r;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
......
......@@ -275,7 +275,7 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
......@@ -304,7 +304,7 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
......@@ -328,7 +328,7 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
LIST_HEAD(tmp_list);
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
......@@ -352,7 +352,7 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
LIST_HEAD(tmp_list);
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
......@@ -471,7 +471,7 @@ ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
act &= ~flags_mask;
......
......@@ -670,7 +670,7 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_fwu_priv priv;
enum ice_status status;
int status;
int err;
switch (preservation) {
......
......@@ -1758,7 +1758,7 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
} else if (status == -ENOENT) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, error: %d\n",
vid, vsi->vsi_num, status);
} else {
......@@ -3036,7 +3036,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
enum ice_status err;
int err;
struct ice_pf *pf;
if (!vsi->back)
......@@ -3775,39 +3775,6 @@ void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
u64_stats_update_end(&rx_ring->syncp);
}
/**
* ice_status_to_errno - convert from enum ice_status to Linux errno
* @err: ice_status value to convert
*/
int ice_status_to_errno(enum ice_status err)
{
switch (err) {
case ICE_SUCCESS:
return 0;
case ICE_ERR_DOES_NOT_EXIST:
return -ENOENT;
case ICE_ERR_OUT_OF_RANGE:
case ICE_ERR_AQ_ERROR:
case ICE_ERR_AQ_TIMEOUT:
case ICE_ERR_AQ_EMPTY:
case ICE_ERR_AQ_FW_CRITICAL:
return -EIO;
case ICE_ERR_PARAM:
case ICE_ERR_INVAL_SIZE:
return -EINVAL;
case ICE_ERR_NO_MEMORY:
return -ENOMEM;
case ICE_ERR_MAX_LIMIT:
return -EAGAIN;
case ICE_ERR_RESET_ONGOING:
return -EBUSY;
case ICE_ERR_AQ_FULL:
return -ENOSPC;
default:
return -EINVAL;
}
}
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
......@@ -4119,7 +4086,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
if (status == ICE_ERR_AQ_ERROR) {
if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
dev_warn(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status,
......
......@@ -103,9 +103,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
......
......@@ -333,7 +333,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
if (status == ICE_ERR_NO_MEMORY) {
if (status == -ENOMEM) {
err = -ENOMEM;
goto out;
}
......@@ -346,7 +346,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
*/
if (status && status != ICE_ERR_ALREADY_EXISTS) {
if (status && status != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
......@@ -1424,7 +1424,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
u16 opcode;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
if (ret == ICE_ERR_AQ_NO_WORK)
if (ret == -EALREADY)
break;
if (ret) {
dev_err(dev, "%s Receive Queue event error %d\n", qtype,
......@@ -4218,7 +4218,7 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
* Returns ICE_SUCCESS on success, else error code
* Returns 0 on success, else error code
*/
static int ice_send_version(struct ice_pf *pf)
{
......@@ -5394,14 +5394,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
if (status && status != -ENOENT) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS)
if (status == -EEXIST)
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
......
......@@ -27,7 +27,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return ICE_ERR_PARAM;
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
......@@ -74,7 +74,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
return -EINVAL;
}
do {
......@@ -131,7 +131,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
return ICE_ERR_PARAM;
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
......@@ -329,7 +329,7 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module);
return ICE_ERR_PARAM;
return -EINVAL;
}
status = ice_acquire_nvm(hw, ICE_RES_READ);
......@@ -482,7 +482,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len;
return 0;
}
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
......@@ -490,7 +490,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST;
return -ENOENT;
}
/**
......@@ -525,7 +525,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
}
/* Subtract one to get PBA word count (PBA Size word is included in
......@@ -534,7 +534,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM;
return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
......@@ -650,14 +650,14 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
return ICE_ERR_NVM;
return -EIO;
}
*civd = tmp;
return 0;
}
return ICE_ERR_NVM;
return -EIO;
}
/**
......@@ -730,7 +730,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
return ICE_ERR_NVM;
return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
......@@ -741,7 +741,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length);
return ICE_ERR_NVM;
return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
......@@ -751,7 +751,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
......@@ -819,7 +819,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR &&
if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
......@@ -933,7 +933,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
return ICE_ERR_CFG;
return -EIO;
}
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
......@@ -1021,7 +1021,7 @@ int ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */
flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
return -EIO;
}
status = ice_discover_flash_size(hw);
......@@ -1080,7 +1080,7 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
status = ICE_ERR_NVM_CHECKSUM;
status = -EIO;
return status;
}
......@@ -1144,7 +1144,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc;
if (length != 0 && !data)
return ICE_ERR_PARAM;
return -EINVAL;
cmd = &desc.params.pkg_data;
......@@ -1183,7 +1183,7 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
int status;
if (!data || !comp_response || !comp_response_code)
return ICE_ERR_PARAM;
return -EINVAL;
cmd = &desc.params.pass_comp_tbl;
......
This diff is collapsed.
......@@ -236,7 +236,7 @@ ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
return -EIO;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
......@@ -309,7 +309,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
int status = 0;
if (!is_malvf || !mbx_data)
return ICE_ERR_BAD_PTR;
return -EINVAL;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
......@@ -320,7 +320,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
......@@ -329,7 +329,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
return ICE_ERR_PARAM;
return -EINVAL;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
......@@ -383,7 +383,7 @@ ice_mbx_vf_state_handler(struct ice_hw *hw,
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
status = ICE_ERR_CFG;
status = -EIO;
}
snap_buf->state = new_state;
......@@ -410,15 +410,15 @@ ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
return ICE_ERR_PARAM;
return -EINVAL;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
if (vf_id >= bitmap_len)
return ICE_ERR_OUT_OF_RANGE;
return -EIO;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!test_and_set_bit(vf_id, all_malvfs))
......@@ -446,14 +446,14 @@ ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
return ICE_ERR_PARAM;
return -EINVAL;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
return -EIO;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
clear_bit(vf_id, all_malvfs);
......@@ -491,13 +491,13 @@ int ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
return ICE_ERR_INVAL_SIZE;
return -EINVAL;
snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
sizeof(*snap->mbx_vf.vf_cntr),
GFP_KERNEL);
if (!snap->mbx_vf.vf_cntr)
return ICE_ERR_NO_MEMORY;
return -ENOMEM;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, Intel Corporation. */
#ifndef _ICE_STATUS_H_
#define _ICE_STATUS_H_
/* Error Codes */
enum ice_status {
ICE_SUCCESS = 0,
/* Generic codes : Range -1..-49 */
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
ICE_ERR_RESET_FAILED = -9,
ICE_ERR_FW_API_VER = -10,
ICE_ERR_NO_MEMORY = -11,
ICE_ERR_CFG = -12,
ICE_ERR_OUT_OF_RANGE = -13,
ICE_ERR_ALREADY_EXISTS = -14,
ICE_ERR_DOES_NOT_EXIST = -15,
ICE_ERR_IN_USE = -16,
ICE_ERR_MAX_LIMIT = -17,
ICE_ERR_RESET_ONGOING = -18,
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
ICE_ERR_AQ_ERROR = -100,
ICE_ERR_AQ_TIMEOUT = -101,
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */
This diff is collapsed.
......@@ -450,7 +450,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.fltr_rule_id = fltr->cookie;
status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (status == ICE_ERR_ALREADY_EXISTS) {
if (status == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
......@@ -1162,7 +1162,7 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.vsi_handle = fltr->dest_id;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
if (err == ICE_ERR_DOES_NOT_EXIST) {
if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT;
}
......
......@@ -7,7 +7,6 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
......
......@@ -605,7 +605,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
tun + 1, &prof);
ret = ice_status_to_errno(status);
ret = status;
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
......@@ -615,7 +615,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
ret = ice_status_to_errno(status);
ret = status;
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
......@@ -625,7 +625,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
ret = ice_status_to_errno(status);
ret = status;
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
......@@ -1230,7 +1230,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
ret = ice_status_to_errno(status);
ret = status;
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
......
......@@ -862,7 +862,7 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
if (status) {
dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
vf->vf_id, status);
return ice_status_to_errno(status);
return status;
}
vf->num_mac++;
......@@ -874,7 +874,7 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
&vf->hw_lan_addr.addr[0], vf->vf_id,
status);
return ice_status_to_errno(status);
return status;
}
vf->num_mac++;
......@@ -1238,10 +1238,10 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
else
status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
if (status && status != ICE_ERR_ALREADY_EXISTS) {
if (status && status != -EEXIST) {
dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
vf->vf_id, status);
return ice_status_to_errno(status);
return status;
}
return 0;
......@@ -1261,10 +1261,10 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
else
status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
if (status && status != -ENOENT) {
dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
vf->vf_id, status);
return ice_status_to_errno(status);
return status;
}
return 0;
......@@ -1758,7 +1758,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
if (status) {
dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
vf->vf_id, status);
err = ice_status_to_errno(status);
err = status;
goto release_vsi;
}
......@@ -2026,7 +2026,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
if (status)
return ice_status_to_errno(status);
return status;
err = ice_pci_sriov_ena(pf, num_vfs);
if (err) {
......@@ -2733,12 +2733,12 @@ static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
addl_hdrs);
/* We just ignore ICE_ERR_DOES_NOT_EXIST, because
/* We just ignore -ENOENT, because
* if two configurations share the same profile remove
* one of them actually removes both, since the
* profile is deleted.
*/
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
if (status && status != -ENOENT) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
vf->vf_id, status);
......@@ -3802,7 +3802,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
}
status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
if (status == -EEXIST) {
dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
/* don't return since we might need to update
......@@ -3896,7 +3896,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) {
if (status == -ENOENT) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment