Commit 1f99fc7f authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-health-and-error-recovery'

Michael Chan says:

====================
bnxt_en: health and error recovery.

This patchset implements adapter health and error recovery.  The status
is reported through several devlink reporters and the driver will
initiate and complete the recovery process using the devlink infrastructure.

v2: Added 4 patches at the beginning of the patchset to clean up error code
    handling related to firmware messages and to convert to use standard
    error codes.

    Removed the dropping of rtnl_lock in bnxt_close().

    Broke up the patches some more for better patch organization and
    future bisection.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 19828bd3 acfb50e4
This diff is collapsed.
......@@ -472,6 +472,19 @@ struct rx_tpa_end_cmp_ext {
((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
(((data1) & \
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
......@@ -632,6 +645,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
......@@ -1058,6 +1072,7 @@ struct bnxt_pf_info {
u8 mac_addr[ETH_ALEN];
u32 first_vf_id;
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
u32 max_encap_records;
u32 max_decap_records;
......@@ -1217,6 +1232,9 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
......@@ -1333,6 +1351,53 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info *tqm_mem[9];
};
struct bnxt_fw_health {
u32 flags;
u32 polling_dsecs;
u32 master_func_wait_dsecs;
u32 normal_func_wait_dsecs;
u32 post_reset_wait_dsecs;
u32 post_reset_max_wait_dsecs;
u32 regs[4];
u32 mapped_regs[4];
#define BNXT_FW_HEALTH_REG 0
#define BNXT_FW_HEARTBEAT_REG 1
#define BNXT_FW_RESET_CNT_REG 2
#define BNXT_FW_RESET_INPROG_REG 3
u32 fw_reset_inprog_reg_mask;
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
u8 master:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
};
struct bnxt_fw_reporter_ctx {
unsigned long sp_event;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
#define BNXT_FW_HEALTH_REG_TYPE_CFG 0
#define BNXT_FW_HEALTH_REG_TYPE_GRC 1
#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2
#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3
#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
#define BNXT_FW_HEALTH_WIN_BASE 0x3000
#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
#define BNXT_FW_STATUS_HEALTHY 0x8000
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
......@@ -1555,6 +1620,10 @@ struct bnxt {
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
#define BNXT_STATE_FW_RESET_DET 3
#define BNXT_STATE_IN_FW_RESET 4
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
struct bnxt_irq *irq_tbl;
int total_irqs;
......@@ -1579,6 +1648,7 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
#define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
......@@ -1663,6 +1733,24 @@ struct bnxt {
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
#define BNXT_RING_COAL_NOW_SP_EVENT 17
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
struct delayed_work fw_reset_task;
int fw_reset_state;
#define BNXT_FW_RESET_STATE_POLL_VF 1
#define BNXT_FW_RESET_STATE_RESET_FW 2
#define BNXT_FW_RESET_STATE_ENABLE_DEV 3
#define BNXT_FW_RESET_STATE_POLL_FW 4
#define BNXT_FW_RESET_STATE_OPENING 5
u16 fw_reset_min_dsecs;
#define BNXT_DFLT_FW_RST_MIN_DSECS 20
u16 fw_reset_max_dsecs;
#define BNXT_DFLT_FW_RST_MAX_DSECS 60
unsigned long fw_reset_timestamp;
struct bnxt_fw_health *fw_health;
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
......@@ -1868,6 +1956,7 @@ extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
......@@ -1900,6 +1989,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
......
......@@ -377,8 +377,6 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
......@@ -391,6 +389,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
struct hwrm_queue_dscp_qcaps_input req = {0};
int rc;
bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
......@@ -433,8 +432,6 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
return rc;
......@@ -722,6 +719,7 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
void bnxt_dcb_init(struct bnxt *bp)
{
bp->dcbx_cap = 0;
if (bp->hwrm_spec_code < 0x10501)
return;
......
......@@ -15,6 +15,192 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *health = bp->fw_health;
u32 val, health_status;
int rc;
if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
health_status = val & 0xffff;
if (health_status == BNXT_FW_STATUS_HEALTHY) {
rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
"Healthy;");
if (rc)
return rc;
} else if (health_status < BNXT_FW_STATUS_HEALTHY) {
rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
"Not yet completed initialization;");
if (rc)
return rc;
} else if (health_status > BNXT_FW_STATUS_HEALTHY) {
rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
"Encountered fatal error and cannot recover;");
if (rc)
return rc;
}
if (val >> 16) {
rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
if (rc)
return rc;
}
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
if (rc)
return rc;
return 0;
}
static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.name = "fw",
.diagnose = bnxt_fw_reporter_diagnose,
};
static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
void *priv_ctx)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
if (!priv_ctx)
return -EOPNOTSUPP;
bnxt_fw_reset(bp);
return 0;
}
static const
struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
.name = "fw_reset",
.recover = bnxt_fw_reset_recover,
};
static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
void *priv_ctx)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
unsigned long event;
if (!priv_ctx)
return -EOPNOTSUPP;
event = fw_reporter_ctx->sp_event;
if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
bnxt_fw_reset(bp);
else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
bnxt_fw_exception(bp);
return 0;
}
static const
struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = bnxt_fw_fatal_recover,
};
static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
if (!health)
return;
health->fw_reporter =
devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
0, false, bp);
if (IS_ERR(health->fw_reporter)) {
netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_reporter = NULL;
}
health->fw_reset_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_reset_reporter_ops,
0, true, bp);
if (IS_ERR(health->fw_reset_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_reset_reporter));
health->fw_reset_reporter = NULL;
}
health->fw_fatal_reporter =
devlink_health_reporter_create(bp->dl,
&bnxt_dl_fw_fatal_reporter_ops,
0, true, bp);
if (IS_ERR(health->fw_fatal_reporter)) {
netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
health->fw_fatal_reporter = NULL;
}
}
static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
if (!health)
return;
if (health->fw_reporter)
devlink_health_reporter_destroy(health->fw_reporter);
if (health->fw_reset_reporter)
devlink_health_reporter_destroy(health->fw_reset_reporter);
if (health->fw_fatal_reporter)
devlink_health_reporter_destroy(health->fw_fatal_reporter);
}
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
struct bnxt_fw_reporter_ctx fw_reporter_ctx;
if (!fw_health)
return;
fw_reporter_ctx.sp_event = event;
switch (event) {
case BNXT_FW_RESET_NOTIFY_SP_EVENT:
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
if (!fw_health->fw_fatal_reporter)
return;
devlink_health_report(fw_health->fw_fatal_reporter,
"FW fatal async event received",
&fw_reporter_ctx);
return;
}
if (!fw_health->fw_reset_reporter)
return;
devlink_health_report(fw_health->fw_reset_reporter,
"FW non-fatal reset event received",
&fw_reporter_ctx);
return;
case BNXT_FW_EXCEPTION_SP_EVENT:
if (!fw_health->fw_fatal_reporter)
return;
devlink_health_report(fw_health->fw_fatal_reporter,
"FW fatal error reported",
&fw_reporter_ctx);
return;
}
}
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
......@@ -109,13 +295,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
return -EACCES;
} else if (rc) {
return -EIO;
}
return 0;
return rc;
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
......@@ -251,6 +433,8 @@ int bnxt_dl_register(struct bnxt *bp)
devlink_params_publish(dl);
bnxt_dl_fw_reporters_create(bp);
return 0;
err_dl_port_unreg:
......@@ -273,6 +457,7 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
bnxt_dl_fw_reporters_destroy(bp);
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
devlink_port_unregister(&bp->dl_port);
......
......@@ -55,6 +55,7 @@ struct bnxt_dl_nvm_param {
u16 num_bits;
};
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
......
......@@ -1699,6 +1699,11 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
static void bnxt_print_admin_err(struct bnxt *bp)
{
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
......@@ -1738,13 +1743,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
netdev_info(dev,
"PF does not have admin privileges to flash the device\n");
rc = -EACCES;
} else if (rc) {
rc = -EIO;
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
......@@ -1794,13 +1794,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
netdev_info(dev,
"PF does not have admin privileges to reset the device\n");
rc = -EACCES;
} else if (rc) {
rc = -EIO;
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
......@@ -2095,13 +2090,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
netdev_info(dev,
"PF does not have admin privileges to flash the device\n");
rc = -EACCES;
} else if (hwrm_err) {
rc = -EOPNOTSUPP;
}
if (hwrm_err == -EACCES)
bnxt_print_admin_err(bp);
return rc;
}
......@@ -2640,8 +2630,6 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
return rc;
}
......@@ -3362,6 +3350,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
bp->num_tests = 0;
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
......@@ -3371,6 +3360,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (rc)
goto ethtool_init_exit;
test_info = bp->test_info;
if (!test_info)
test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
goto ethtool_init_exit;
......
......@@ -25,7 +25,6 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
......@@ -40,23 +39,10 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
goto fwd_async_event_cmpl_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
resp->error_code);
rc = -1;
}
fwd_async_event_cmpl_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -133,7 +119,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
return -EIO;
return rc;
}
vf->func_qcfg_flags = le16_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
......@@ -164,9 +150,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -EIO;
return 0;
return rc;
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
......@@ -486,10 +470,43 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
/* Caller holds bp->hwrm_cmd_lock mutex lock */
static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt_vf_info *vf;
vf = &bp->pf.vf[vf_id];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags);
if (is_valid_ether_addr(vf->mac_addr)) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
}
if (vf->vlan) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
req.dflt_vlan = cpu_to_le16(vf->vlan);
}
if (vf->max_tx_rate) {
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(vf->max_tx_rate);
#ifdef HAVE_IFLA_TX_RATE
req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
req.min_bw = cpu_to_le32(vf->min_tx_rate);
#endif
}
if (vf->flags & BNXT_VF_TRUST)
req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
struct hwrm_func_vf_resource_cfg_input req = {0};
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
......@@ -561,13 +578,14 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
if (reset)
__bnxt_set_vf_params(bp, i);
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
if (rc) {
rc = -ENOMEM;
if (rc)
break;
}
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
......@@ -664,8 +682,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
rc = -ENOMEM;
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
......@@ -679,14 +695,40 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
return rc;
}
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
if (BNXT_NEW_RM(bp))
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
}
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
{
int rc;
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
if (rc)
return rc;
/* Reserve resources for VFs */
rc = bnxt_func_cfg(bp, *num_vfs, reset);
if (rc != *num_vfs) {
if (rc <= 0) {
netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
*num_vfs = 0;
return rc;
}
netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
rc);
*num_vfs = rc;
}
bnxt_ulp_sriov_cfg(bp, *num_vfs);
return 0;
}
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
......@@ -752,25 +794,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out1;
/* Reserve resources for VFs */
rc = bnxt_func_cfg(bp, *num_vfs);
if (rc != *num_vfs) {
if (rc <= 0) {
netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
*num_vfs = 0;
goto err_out2;
}
netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
*num_vfs = rc;
}
/* Register buffers for VFs */
rc = bnxt_hwrm_func_buf_rgtr(bp);
rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
if (rc)
goto err_out2;
bnxt_ulp_sriov_cfg(bp, *num_vfs);
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
......@@ -837,6 +864,11 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
rtnl_unlock();
......@@ -870,7 +902,6 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_fwd_resp_input req = {0};
struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
......@@ -885,22 +916,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req.encap_resp, encap_resp, msg_size);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
goto fwd_resp_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
resp->error_code);
rc = -1;
}
fwd_resp_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -909,7 +927,6 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_reject_fwd_resp_input req = {0};
struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
......@@ -920,22 +937,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
goto fwd_err_resp_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
resp->error_code);
rc = -1;
}
fwd_err_resp_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -944,7 +948,6 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {0};
struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
......@@ -955,22 +958,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
goto exec_fwd_resp_exit;
}
if (resp->error_code) {
netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
resp->error_code);
rc = -1;
}
exec_fwd_resp_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -1190,6 +1180,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
}
#else
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
{
if (*num_vfs)
return -EOPNOTSUPP;
return 0;
}
void bnxt_sriov_disable(struct bnxt *bp)
{
}
......
......@@ -36,6 +36,7 @@ int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
......
......@@ -319,8 +319,6 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
return rc;
}
......@@ -515,11 +513,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
rc = -ENOSPC;
else if (rc)
rc = -EIO;
return rc;
}
......@@ -591,8 +584,6 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
rc = -EIO;
return rc;
}
......@@ -609,8 +600,6 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
return rc;
}
......@@ -660,8 +649,6 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
rc = -EIO;
return rc;
}
......@@ -678,8 +665,6 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
return rc;
}
......@@ -1457,8 +1442,6 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
rc = -EIO;
return rc;
}
......
......@@ -226,6 +226,9 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct input *req;
int rc;
if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
return -EBUSY;
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment