Commit cec88ea3 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en updates for net-next.

Mostly small miscellaneous changes.

Please review for net-next.  Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents efb15c39 51f30785
...@@ -3414,7 +3414,8 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ...@@ -3414,7 +3414,8 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
/* Only RSS support for now TBD: COS & LB */ /* Only RSS support for now TBD: COS & LB */
req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
VNIC_CFG_REQ_ENABLES_RSS_RULE); VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
req.cos_rule = cpu_to_le16(0xffff); req.cos_rule = cpu_to_le16(0xffff);
if (vnic->flags & BNXT_VNIC_RSS_FLAG) if (vnic->flags & BNXT_VNIC_RSS_FLAG)
...@@ -3951,7 +3952,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) ...@@ -3951,7 +3952,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000); req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
...@@ -4025,6 +4026,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -4025,6 +4026,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid); pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id); pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
...@@ -4315,6 +4317,16 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) ...@@ -4315,6 +4317,16 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif #endif
} }
/* Allow PF and VF with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp) && !bp->vf.vlan)
return false;
#endif
return true;
}
static int bnxt_cfg_rx_mode(struct bnxt *); static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *); static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
...@@ -4380,7 +4392,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) ...@@ -4380,7 +4392,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) { if (bp->dev->flags & IFF_ALLMULTI) {
...@@ -5295,12 +5307,19 @@ static int bnxt_open(struct net_device *dev) ...@@ -5295,12 +5307,19 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int rc = 0; int rc = 0;
rc = bnxt_hwrm_func_reset(bp); if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
if (rc) { rc = bnxt_hwrm_func_reset(bp);
netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", if (rc) {
rc); netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
rc = -1; rc);
return rc; rc = -EBUSY;
return rc;
}
/* Do func_reset during the 1st PF open only to prevent killing
* the VFs when the PF is brought down and up.
*/
if (BNXT_PF(bp))
set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
} }
return __bnxt_open_nic(bp, true, true); return __bnxt_open_nic(bp, true, true);
} }
...@@ -5520,8 +5539,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) ...@@ -5520,8 +5539,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
/* Only allow PF to be in promiscuous mode */ if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp); uc_update = bnxt_uc_list_updated(bp);
...@@ -5976,6 +5994,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) ...@@ -5976,6 +5994,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->tx_coal_ticks_irq = 2; bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2; bp->tx_coal_bufs_irq = 2;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
init_timer(&bp->timer); init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp; bp->timer.data = (unsigned long)bp;
bp->timer.function = bnxt_timer; bp->timer.function = bnxt_timer;
...@@ -6041,7 +6061,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) ...@@ -6041,7 +6061,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
if (new_mtu < 60 || new_mtu > 9000) if (new_mtu < 60 || new_mtu > 9500)
return -EINVAL; return -EINVAL;
if (netif_running(dev)) if (netif_running(dev))
...@@ -6676,6 +6696,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, ...@@ -6676,6 +6696,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state) pci_channel_state_t state)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
netdev_info(netdev, "PCI I/O error detected\n"); netdev_info(netdev, "PCI I/O error detected\n");
...@@ -6690,6 +6711,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, ...@@ -6690,6 +6711,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev)) if (netif_running(netdev))
bnxt_close(netdev); bnxt_close(netdev);
/* So that func_reset will be done during slot_reset */
clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev); pci_disable_device(pdev);
rtnl_unlock(); rtnl_unlock();
......
...@@ -11,10 +11,10 @@ ...@@ -11,10 +11,10 @@
#define BNXT_H #define BNXT_H
#define DRV_MODULE_NAME "bnxt_en" #define DRV_MODULE_NAME "bnxt_en"
#define DRV_MODULE_VERSION "1.2.0" #define DRV_MODULE_VERSION "1.3.0"
#define DRV_VER_MAJ 1 #define DRV_VER_MAJ 1
#define DRV_VER_MIN 0 #define DRV_VER_MIN 3
#define DRV_VER_UPD 0 #define DRV_VER_UPD 0
struct tx_bd { struct tx_bd {
...@@ -359,7 +359,8 @@ struct rx_tpa_end_cmp { ...@@ -359,7 +359,8 @@ struct rx_tpa_end_cmp {
RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
#define TPA_END_GRO_TS(rx_tpa_end) \ #define TPA_END_GRO_TS(rx_tpa_end) \
((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
cpu_to_le32(RX_TPA_END_GRO_TS)))
struct rx_tpa_end_cmp_ext { struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks; __le32 rx_tpa_end_cmp_dup_acks;
...@@ -753,8 +754,8 @@ struct bnxt_vf_info { ...@@ -753,8 +754,8 @@ struct bnxt_vf_info {
struct bnxt_pf_info { struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1 #define BNXT_FIRST_PF_FID 1
#define BNXT_FIRST_VF_FID 128 #define BNXT_FIRST_VF_FID 128
u32 fw_fid; u16 fw_fid;
u8 port_id; u16 port_id;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
u16 max_rsscos_ctxs; u16 max_rsscos_ctxs;
u16 max_cp_rings; u16 max_cp_rings;
...@@ -1017,6 +1018,7 @@ struct bnxt { ...@@ -1017,6 +1018,7 @@ struct bnxt {
unsigned long state; unsigned long state;
#define BNXT_STATE_OPEN 0 #define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1 #define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl; struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
...@@ -1065,6 +1067,11 @@ struct bnxt { ...@@ -1065,6 +1067,11 @@ struct bnxt {
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
u32 stats_coal_ticks;
#define BNXT_DEF_STATS_COAL_TICKS 1000000
#define BNXT_MIN_STATS_COAL_TICKS 250000
#define BNXT_MAX_STATS_COAL_TICKS 1000000
struct work_struct sp_task; struct work_struct sp_task;
unsigned long sp_event; unsigned long sp_event;
#define BNXT_RX_MASK_SP_EVENT 0 #define BNXT_RX_MASK_SP_EVENT 0
......
...@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev, ...@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
return 0; return 0;
} }
...@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal) struct ethtool_coalesce *coal)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
bool update_stats = false;
int rc = 0; int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs; bp->rx_coal_ticks = coal->rx_coalesce_usecs;
...@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev, ...@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev,
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
if (netif_running(dev)) if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
rc = bnxt_hwrm_set_coal(bp); u32 stats_ticks = coal->stats_block_coalesce_usecs;
stats_ticks = clamp_t(u32, stats_ticks,
BNXT_MIN_STATS_COAL_TICKS,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
update_stats = true;
}
if (netif_running(dev)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
rc = bnxt_open_nic(bp, true, false);
} else {
rc = bnxt_hwrm_set_coal(bp);
}
}
return rc; return rc;
} }
...@@ -961,7 +982,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, ...@@ -961,7 +982,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_SINGLE_PF(bp))
return rc; return -EOPNOTSUPP;
if (epause->autoneg) { if (epause->autoneg) {
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
...@@ -1059,6 +1080,8 @@ static int bnxt_firmware_reset(struct net_device *dev, ...@@ -1059,6 +1080,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_APE_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
/* Self-reset APE upon next PCIe reset: */
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break; break;
case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH: case BNX_DIR_TYPE_KONG_PATCH:
...@@ -1092,9 +1115,27 @@ static int bnxt_flash_firmware(struct net_device *dev, ...@@ -1092,9 +1115,27 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2: case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT; code_type = CODE_BOOT;
break; break;
case BNX_DIR_TYPE_CHIMP_PATCH:
code_type = CODE_CHIMP_PATCH;
break;
case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_FW:
code_type = CODE_MCTP_PASSTHRU; code_type = CODE_MCTP_PASSTHRU;
break; break;
case BNX_DIR_TYPE_APE_PATCH:
code_type = CODE_APE_PATCH;
break;
case BNX_DIR_TYPE_KONG_FW:
code_type = CODE_KONG_FW;
break;
case BNX_DIR_TYPE_KONG_PATCH:
code_type = CODE_KONG_PATCH;
break;
case BNX_DIR_TYPE_BONO_FW:
code_type = CODE_BONO_FW;
break;
case BNX_DIR_TYPE_BONO_PATCH:
code_type = CODE_BONO_PATCH;
break;
default: default:
netdev_err(dev, "Unsupported directory entry type: %u\n", netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type); dir_type);
...@@ -1149,6 +1190,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) ...@@ -1149,6 +1190,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_APE_PATCH:
case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH: case BNX_DIR_TYPE_KONG_PATCH:
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
return true; return true;
} }
...@@ -1186,7 +1229,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, ...@@ -1186,7 +1229,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw; const struct firmware *fw;
int rc; int rc;
if (bnxt_dir_type_is_executable(dir_type) == false) if (dir_type != BNX_DIR_TYPE_UPDATE &&
bnxt_dir_type_is_executable(dir_type) == false)
return -EINVAL; return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev); rc = request_firmware(&fw, filename, &dev->dev);
...@@ -1483,7 +1527,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) ...@@ -1483,7 +1527,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
int rc = 0; int rc = 0;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_SINGLE_PF(bp))
return 0; return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -70,6 +70,7 @@ enum SUPPORTED_CODE { ...@@ -70,6 +70,7 @@ enum SUPPORTED_CODE {
CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
CODE_BONO_FW, /* 19 - BONO firmware */ CODE_BONO_FW, /* 19 - BONO firmware */
CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
MAX_CODE_TYPE, MAX_CODE_TYPE,
}; };
......
...@@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl { ...@@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
...@@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error { ...@@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
}; };
/* HW Resource Manager Specification 1.2.2 */ /* HW Resource Manager Specification 1.3.0 */
#define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 2 #define HWRM_VERSION_MINOR 3
#define HWRM_VERSION_UPDATE 2 #define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_STR "1.2.2" #define HWRM_VERSION_STR "1.3.0"
/* /*
* Following is the signature for HWRM message field that indicates not * Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed. * applicable (All F's). Need to cast it the size of the field if needed.
...@@ -611,6 +612,9 @@ struct cmd_nums { ...@@ -611,6 +612,9 @@ struct cmd_nums {
#define HWRM_FWD_RESP (0xd2UL) #define HWRM_FWD_RESP (0xd2UL)
#define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
#define HWRM_TEMP_MONITOR_QUERY (0xe0UL) #define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
#define HWRM_WOL_FILTER_ALLOC (0xf0UL)
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL)
...@@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output { ...@@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
u8 mac_address[6]; u8 mac_address[6];
__le16 max_rsscos_ctx; __le16 max_rsscos_ctx;
__le16 max_cmpl_rings; __le16 max_cmpl_rings;
...@@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output { ...@@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output {
__le16 fid; __le16 fid;
__le16 port_id; __le16 port_id;
__le16 vlan; __le16 vlan;
u8 unused_0; __le16 flags;
u8 unused_1; #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
u8 mac_address[6]; u8 mac_address[6];
__le16 pci_id; __le16 pci_id;
__le16 alloc_rsscos_ctx; __le16 alloc_rsscos_ctx;
...@@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output { ...@@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
u8 unused_2; u8 unused_0;
__le16 dflt_vnic_id; __le16 dflt_vnic_id;
u8 unused_3; u8 unused_1;
u8 unused_4; u8 unused_2;
__le32 min_bw; __le32 min_bw;
__le32 max_bw; __le32 max_bw;
u8 evb_mode; u8 evb_mode;
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
u8 unused_5; u8 unused_3;
__le16 unused_6; __le16 unused_4;
__le32 alloc_mcast_filters; __le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps; __le32 alloc_hw_ring_grps;
u8 unused_5;
u8 unused_6;
u8 unused_7; u8 unused_7;
u8 unused_8;
u8 unused_9;
u8 valid; u8 valid;
}; };
...@@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input { ...@@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input {
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
__le16 req_buf_len; __le16 req_buf_len;
__le16 resp_buf_len; __le16 resp_buf_len;
...@@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input { ...@@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
__le32 enables; __le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
...@@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output { ...@@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
__le32 unused_1; __le16 fec_cfg;
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
u8 unused_1;
u8 unused_2;
char phy_vendor_name[16]; char phy_vendor_name[16];
char phy_vendor_partnumber[16]; char phy_vendor_partnumber[16];
__le32 unused_2; __le32 unused_3;
u8 unused_3;
u8 unused_4; u8 unused_4;
u8 unused_5; u8 unused_5;
u8 unused_6;
u8 valid; u8 valid;
}; };
...@@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input { ...@@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
__le32 enables; __le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
...@@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output { ...@@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid; u8 valid;
}; };
/* hwrm_queue_qportcfg */
/* Input (24 bytes) */ /* Input (24 bytes) */
struct hwrm_queue_qportcfg_input { struct hwrm_queue_qportcfg_input {
__le16 req_type; __le16 req_type;
...@@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id0_pri_lvl; u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight; u8 queue_id0_bw_weight;
u8 queue_id1; u8 queue_id1;
...@@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id1_pri_lvl; u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight; u8 queue_id1_bw_weight;
u8 queue_id2; u8 queue_id2;
...@@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id2_pri_lvl; u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight; u8 queue_id2_bw_weight;
u8 queue_id3; u8 queue_id3;
...@@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id3_pri_lvl; u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight; u8 queue_id3_bw_weight;
u8 queue_id4; u8 queue_id4;
...@@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id4_pri_lvl; u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight; u8 queue_id4_bw_weight;
u8 queue_id5; u8 queue_id5;
...@@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id5_pri_lvl; u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight; u8 queue_id5_bw_weight;
u8 queue_id6; u8 queue_id6;
...@@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id6_pri_lvl; u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight; u8 queue_id6_bw_weight;
u8 queue_id7; u8 queue_id7;
...@@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input { ...@@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id7_pri_lvl; u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight; u8 queue_id7_bw_weight;
u8 unused_1[5]; u8 unused_1[5];
...@@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output { ...@@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
}; };
/* hwrm_cfa_l2_set_rx_mask */ /* hwrm_cfa_l2_set_rx_mask */
/* Input (40 bytes) */ /* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input { struct hwrm_cfa_l2_set_rx_mask_input {
__le16 req_type; __le16 req_type;
__le16 cmpl_ring; __le16 cmpl_ring;
...@@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input { ...@@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input {
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
__le64 mc_tbl_addr; __le64 mc_tbl_addr;
__le32 num_mc_entries; __le32 num_mc_entries;
__le32 unused_0; __le32 unused_0;
__le64 vlan_tag_tbl_addr;
__le32 num_vlan_tags;
__le32 unused_1;
}; };
/* Output (16 bytes) */ /* Output (16 bytes) */
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
enum bnxt_nvm_directory_type { enum bnxt_nvm_directory_type {
BNX_DIR_TYPE_UNUSED = 0, BNX_DIR_TYPE_UNUSED = 0,
BNX_DIR_TYPE_PKG_LOG = 1, BNX_DIR_TYPE_PKG_LOG = 1,
BNX_DIR_TYPE_UPDATE = 2,
BNX_DIR_TYPE_CHIMP_PATCH = 3, BNX_DIR_TYPE_CHIMP_PATCH = 3,
BNX_DIR_TYPE_BOOTCODE = 4, BNX_DIR_TYPE_BOOTCODE = 4,
BNX_DIR_TYPE_VPD = 5, BNX_DIR_TYPE_VPD = 5,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment