Commit 59b8d277 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-05-21

This series contains updates to ice driver only.  Several of the changes
are fixes, which could be backported to stable, of which, only one was
marked for stable because of the memory leak potential.

Jake exposes the information in the flash memory used for link
management, which is called the netlist module.

Henry and Tony add support for tunnel offloads.

Brett adds promiscuous support in VF's which is based on VF trust and
the new vf-true-promisc flag.

Avinash fixes an issue where a transmit timeout for a queue that belongs
to a PFC enabled TC is not a true transmit timeout, but because the PFC
is in action.

Dave fixes the check for contiguous TCs to allow for various UP2TC
mapping configurations.  Also fixed an issue when changing the pause
parameters would could multiple link drop/down's in succession, which in
turn caused the firmware to not generate a link interrupt for the driver
to respond to.

Anirudh (Ani) fixed a potential race condition in probe/open due to a
bit being cleared too early.

Lihong updates an error message to make it more meaningful instead of
just printing out the numerical value of the status/error code.  Also
fixed an incorrect return value if deleting a filter does not find a
match to delete or when adding a filter that already exists.

Karol fixes casting issues and precision loss in the driver.

Jesse make the sign usage more consistent in the driver by making sure
all instances of vf_id are unsigned, since it can never be negative.

Eric fixes a potential memory leak in ice_add_prof_id_vsig() where was
not cleaning up resources properly when an error occurs.

Michal to help organize the filtering code in the driver, refactor the
code into a separate file and add functions to prepare the filter
information.

Bruce cleaned up a conditional statement that always resulted in true
and provided a comment to make it more obvious.  Also cleaned up
redundant code checks.

Tony helps with potential namespace issues by renaming a 'ice' specific
function with the driver name prepended.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4001f1f0 5757cc7c
...@@ -69,6 +69,17 @@ The ``ice`` driver reports the following versions ...@@ -69,6 +69,17 @@ The ``ice`` driver reports the following versions
- The version of the DDP package that is active in the device. Note - The version of the DDP package that is active in the device. Note
that both the name (as reported by ``fw.app.name``) and version are that both the name (as reported by ``fw.app.name``) and version are
required to uniquely identify the package. required to uniquely identify the package.
* - ``fw.netlist``
- running
- 1.1.2000-6.7.0
- The version of the netlist module. This module defines the device's
Ethernet capabilities and default settings, and is used by the
management firmware as part of managing link and device
connectivity.
* - ``fw.netlist.build``
- running
- 0xee16ced7
- The first 4 bytes of the hash of the netlist module contents.
Regions Regions
======= =======
......
...@@ -17,6 +17,7 @@ ice-y := ice_main.o \ ...@@ -17,6 +17,7 @@ ice-y := ice_main.o \
ice_lib.o \ ice_lib.o \
ice_txrx_lib.o \ ice_txrx_lib.o \
ice_txrx.o \ ice_txrx.o \
ice_fltr.o \
ice_flex_pipe.o \ ice_flex_pipe.o \
ice_flow.o \ ice_flow.o \
ice_devlink.o \ ice_devlink.o \
......
...@@ -37,6 +37,10 @@ ...@@ -37,6 +37,10 @@
#include <net/devlink.h> #include <net/devlink.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/xdp_sock.h> #include <net/xdp_sock.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include "ice_devids.h" #include "ice_devids.h"
#include "ice_type.h" #include "ice_type.h"
#include "ice_txrx.h" #include "ice_txrx.h"
...@@ -244,8 +248,8 @@ struct ice_vsi { ...@@ -244,8 +248,8 @@ struct ice_vsi {
u32 tx_busy; u32 tx_busy;
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
int num_q_vectors; u16 num_q_vectors;
int base_vector; /* IRQ base for OS reserved vectors */ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type; enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */ u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */ u16 idx; /* software index in pf->vsi[] */
...@@ -341,6 +345,7 @@ enum ice_pf_flags { ...@@ -341,6 +345,7 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX, ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
...@@ -366,7 +371,7 @@ struct ice_pf { ...@@ -366,7 +371,7 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */ struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */ /* Virtchnl/SR-IOV config info */
struct ice_vf *vf; struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */ u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */ u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf; u16 num_qps_per_vf;
u16 num_msix_per_vf; u16 num_msix_per_vf;
...@@ -385,11 +390,11 @@ struct ice_pf { ...@@ -385,11 +390,11 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */ struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable; u32 msg_enable;
u32 hw_csum_rx_error; u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */ u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
...@@ -523,6 +528,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); ...@@ -523,6 +528,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
int ice_open(struct net_device *netdev); int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev); int ice_stop(struct net_device *netdev);
......
...@@ -541,7 +541,7 @@ struct ice_sw_rule_lkup_rx_tx { ...@@ -541,7 +541,7 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_OTHER_ACTS 0x3 #define ICE_SINGLE_ACT_OTHER_ACTS 0x3
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
(0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
/* Bit 17:18 - Defines other actions */ /* Bit 17:18 - Defines other actions */
/* Other action = 0 - Mirror VSI */ /* Other action = 0 - Mirror VSI */
...@@ -1264,6 +1264,33 @@ struct ice_aqc_nvm_checksum { ...@@ -1264,6 +1264,33 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12]; u8 rsvd2[12];
}; };
/* The result of netlist NVM read comes in a TLV format. The actual data
* (netlist header) starts from word offset 1 (byte 2). The FW strips
* out the type field from the TLV header so all the netlist fields
* should adjust their offset value by 1 word (2 bytes) in order to map
* their correct location.
*/
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
/* netlist ID block field offsets (word offsets) */
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
/** /**
* Send to PF command (indirect 0x0801) ID is only used by PF * Send to PF command (indirect 0x0801) ID is only used by PF
* *
......
...@@ -24,7 +24,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) ...@@ -24,7 +24,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++) for (i = 0; i < qs_cfg->q_count; i++)
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
mutex_unlock(qs_cfg->qs_mutex); mutex_unlock(qs_cfg->qs_mutex);
return 0; return 0;
...@@ -47,7 +47,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) ...@@ -47,7 +47,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
if (index >= qs_cfg->pf_map_size) if (index >= qs_cfg->pf_map_size)
goto err_scatter; goto err_scatter;
set_bit(index, qs_cfg->pf_map); set_bit(index, qs_cfg->pf_map);
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
} }
mutex_unlock(qs_cfg->qs_mutex); mutex_unlock(qs_cfg->qs_mutex);
...@@ -96,7 +96,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) ...@@ -96,7 +96,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* We allocate one q_vector and set default value for ITR setting associated * We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM. * with this q_vector. If allocation fails we return -ENOMEM.
*/ */
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
...@@ -376,7 +376,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -376,7 +376,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Max packet size for this queue - must not be set to a larger value /* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF * than 5 x DBUF
*/ */
rlan_ctx.rxmax = min_t(u16, vsi->max_frame, rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
chain_len * ring->rx_buf_len); chain_len * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */ /* Rx queue threshold in units of 64 */
...@@ -453,7 +453,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) ...@@ -453,7 +453,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
if (ret) { if (ret) {
/* contig failed, so try with scatter approach */ /* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
qs_cfg->q_count = min_t(u16, qs_cfg->q_count, qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
qs_cfg->scatter_count); qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg); ret = __ice_vsi_get_qs_sc(qs_cfg);
} }
...@@ -526,7 +526,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) ...@@ -526,7 +526,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{ {
struct device *dev = ice_pf_to_dev(vsi->back); struct device *dev = ice_pf_to_dev(vsi->back);
int v_idx, err; u16 v_idx;
int err;
if (vsi->q_vectors[0]) { if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
...@@ -562,7 +563,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) ...@@ -562,7 +563,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{ {
int q_vectors = vsi->num_q_vectors; int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem; u16 tx_rings_rem, rx_rings_rem;
int v_id; int v_id;
/* initially assigning remaining rings count to VSIs num queue value */ /* initially assigning remaining rings count to VSIs num queue value */
...@@ -571,10 +572,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -571,10 +572,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (v_id = 0; v_id < q_vectors; v_id++) { for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
int tx_rings_per_v, rx_rings_per_v, q_id, q_base; u8 tx_rings_per_v, rx_rings_per_v;
u16 q_id, q_base;
/* Tx rings mapping to vector */ /* Tx rings mapping to vector */
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v; q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL; q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR; q_vector->tx.itr_idx = ICE_TX_ITR;
...@@ -590,7 +593,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -590,7 +593,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_rem -= tx_rings_per_v; tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */ /* Rx rings mapping to vector */
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v; q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL; q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR; q_vector->rx.itr_idx = ICE_RX_ITR;
...@@ -662,8 +666,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -662,8 +666,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL); 1, qg_buf, buf_len, NULL);
if (status) { if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
status); ice_stat_str(status));
return -ENODEV; return -ENODEV;
} }
...@@ -832,8 +836,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -832,8 +836,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) { } else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) { } else if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
status); ice_stat_str(status));
return -ENODEV; return -ENODEV;
} }
......
...@@ -746,6 +746,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -746,6 +746,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw); status = ice_init_hw_tbls(hw);
if (status) if (status)
goto err_unroll_fltr_mgmt_struct; goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
return 0; return 0;
err_unroll_fltr_mgmt_struct: err_unroll_fltr_mgmt_struct:
...@@ -775,6 +776,7 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -775,6 +776,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_clear_agg(hw); ice_sched_clear_agg(hw);
ice_free_seg(hw); ice_free_seg(hw);
ice_free_hw_tbls(hw); ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
if (hw->port_info) { if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info); devm_kfree(ice_hw_to_dev(hw), hw->port_info);
......
...@@ -1128,7 +1128,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ...@@ -1128,7 +1128,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
} }
memcpy(&e->desc, desc, sizeof(e->desc)); memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen); datalen = le16_to_cpu(desc->datalen);
e->msg_len = min(datalen, e->buf_len); e->msg_len = min_t(u16, datalen, e->buf_len);
if (e->msg_buf && e->msg_len) if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
......
...@@ -62,6 +62,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) ...@@ -62,6 +62,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
return ena_tc; return ena_tc;
} }
/**
* ice_is_pfc_causing_hung_q
* @pf: pointer to PF structure
* @txqueue: Tx queue which is supposedly hung queue
*
* find if PFC is causing the hung queue, if yes return true else false
*/
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
{
u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
u64 ref_prio_xoff[ICE_MAX_UP];
struct ice_vsi *vsi;
u32 up2tc;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return false;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
num_tcs++;
/* first find out the TC to which the hung queue belongs to */
for (tc = 0; tc < num_tcs - 1; tc++)
if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
vsi->tc_cfg.tc_info[tc + 1].qoffset,
txqueue))
break;
/* Build a bit map of all UPs associated to the suspect hung queue TC,
* so that we check for its counter increment.
*/
up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
for (i = 0; i < ICE_MAX_UP; i++) {
up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
if (up_mapped_tc == tc)
up_in_tc |= BIT(i);
}
/* Now that we figured out that hung queue is PFC enabled, still the
* Tx timeout can be legitimate. So to make sure Tx timeout is
* absolutely caused by PFC storm, check if the counters are
* incrementing.
*/
for (i = 0; i < ICE_MAX_UP; i++)
if (up_in_tc & BIT(i))
ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
ice_update_dcb_stats(pf);
for (i = 0; i < ICE_MAX_UP; i++)
if (up_in_tc & BIT(i))
if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
return true;
return false;
}
/** /**
* ice_dcb_get_mode - gets the DCB mode * ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure * @port_info: pointer to port info structure
...@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) ...@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
*/ */
static bool ice_dcb_tc_contig(u8 *prio_table) static bool ice_dcb_tc_contig(u8 *prio_table)
{ {
u8 max_tc = 0; bool found_empty = false;
u8 used_tc = 0;
int i; int i;
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) { /* Create a bitmap of used TCs */
u8 cur_tc = prio_table[i]; for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
used_tc |= BIT(prio_table[i]);
if (cur_tc > max_tc) for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
if (used_tc & BIT(i)) {
if (found_empty)
return false; return false;
else if (cur_tc == max_tc) } else {
max_tc++; found_empty = true;
}
} }
return true; return true;
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
void ice_dcb_rebuild(struct ice_pf *pf); void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
...@@ -32,6 +34,20 @@ void ...@@ -32,6 +34,20 @@ void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event); struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
* @high: start of queue for next TC
* @tx_q: hung_queue/tx_queue
*
* finds if queue 'tx_q' falls between the two offsets of any given TC
*/
static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
{
return (tx_q >= low) && (tx_q < high);
}
static inline void static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{ {
...@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, ...@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0; return 0;
} }
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
{
return false;
}
#define ice_update_dcb_stats(pf) do {} while (0) #define ice_update_dcb_stats(pf) do {} while (0)
#define ice_pf_dcb_recfg(pf) do {} while (0) #define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0) #define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
......
...@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len) ...@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0; return 0;
} }
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
/* The netlist version fields are BCD formatted */
snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
netlist->cust_ver);
return 0;
}
static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
snprintf(buf, len, "0x%08x", netlist->hash);
return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter } #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter } #define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
...@@ -128,6 +149,8 @@ static const struct ice_devlink_version { ...@@ -128,6 +149,8 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack), running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name), running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version), running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
}; };
/** /**
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "ice.h" #include "ice.h"
#include "ice_flow.h" #include "ice_flow.h"
#include "ice_fltr.h"
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
...@@ -157,6 +158,8 @@ struct ice_priv_flag { ...@@ -157,6 +158,8 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = { static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT), ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF), ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX), ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
}; };
...@@ -273,8 +276,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ...@@ -273,8 +276,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_acquire_nvm(hw, ICE_RES_READ); status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) { if (status) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
status, hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
...@@ -282,8 +286,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ...@@ -282,8 +286,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf, status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false); false);
if (status) { if (status) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n", dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
status, hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO; ret = -EIO;
goto release; goto release;
} }
...@@ -332,7 +337,8 @@ static u64 ice_link_test(struct net_device *netdev) ...@@ -332,7 +337,8 @@ static u64 ice_link_test(struct net_device *netdev)
netdev_info(netdev, "link test\n"); netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up); status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) { if (status) {
netdev_err(netdev, "link query error, status = %d\n", status); netdev_err(netdev, "link query error, status = %s\n",
ice_stat_str(status));
return 1; return 1;
} }
...@@ -671,7 +677,6 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -671,7 +677,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_ring *tx_ring, *rx_ring; struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0; u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames; int num_frames, valid_frames;
LIST_HEAD(tmp_list);
struct device *dev; struct device *dev;
u8 *tx_frame; u8 *tx_frame;
int i; int i;
...@@ -707,16 +712,11 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -707,16 +712,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
/* Test VSI needs to receive broadcast packets */ /* Test VSI needs to receive broadcast packets */
eth_broadcast_addr(broadcast); eth_broadcast_addr(broadcast);
if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) { if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
ret = 5; ret = 5;
goto lbtest_mac_dis; goto lbtest_mac_dis;
} }
if (ice_add_mac(&pf->hw, &tmp_list)) {
ret = 6;
goto free_mac_list;
}
if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) { if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 7; ret = 7;
goto remove_mac_filters; goto remove_mac_filters;
...@@ -739,10 +739,8 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -739,10 +739,8 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_free_frame: lbtest_free_frame:
devm_kfree(dev, tx_frame); devm_kfree(dev, tx_frame);
remove_mac_filters: remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list)) if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis: lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */ /* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
...@@ -1158,8 +1156,9 @@ static int ice_nway_reset(struct net_device *netdev) ...@@ -1158,8 +1156,9 @@ static int ice_nway_reset(struct net_device *netdev)
status = ice_aq_set_link_restart_an(pi, false, NULL); status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) { if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n", netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
status, pi->hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
return -EIO; return -EIO;
} }
...@@ -1308,6 +1307,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1308,6 +1307,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_down(vsi); ice_down(vsi);
ice_up(vsi); ice_up(vsi);
} }
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
ice_is_any_vf_in_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret; return ret;
} }
...@@ -2450,8 +2459,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc) ...@@ -2450,8 +2459,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs); status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) { if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n", dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
vsi->vsi_num, status); vsi->vsi_num, ice_stat_str(status));
return -EINVAL; return -EINVAL;
} }
...@@ -2593,7 +2602,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2593,7 +2602,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0; int i, timeout = 50, err = 0;
u32 new_rx_cnt, new_tx_cnt; u16 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC || if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC || ring->tx_pending < ICE_MIN_NUM_DESC ||
...@@ -2645,8 +2654,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ...@@ -2645,8 +2654,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_is_xdp_ena_vsi(vsi)) if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++) for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt; vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = new_tx_cnt; vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = new_rx_cnt; vsi->num_rx_desc = (u16)new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n"); netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done; goto done;
} }
...@@ -2952,31 +2961,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ...@@ -2952,31 +2961,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
status = ice_set_fc(pi, &aq_failures, link_up); status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) { if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n", netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
status, hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN; err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) { } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n", netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
status, hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN; err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) { } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n", netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
status, hw->adminq.sq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN; err = -EAGAIN;
} }
if (!test_bit(__ICE_DOWN, pf->state)) {
/* Give it a little more time to try to come back. If still
* down, restart autoneg link or reinitialize the interface.
*/
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
ice_down(vsi);
ice_up(vsi);
}
return err; return err;
} }
...@@ -3227,8 +3227,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size) ...@@ -3227,8 +3227,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut, status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
vsi->rss_table_size); vsi->rss_table_size);
if (status) { if (status) {
dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n", dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
status, hw->adminq.rq_last_status); ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
err = -EIO; err = -EIO;
} }
......
...@@ -18,6 +18,11 @@ ...@@ -18,6 +18,11 @@
#define ICE_PKG_CNT 4 #define ICE_PKG_CNT 4
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
enum ice_status enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es); struct ice_fv_word *es);
......
...@@ -149,6 +149,7 @@ struct ice_buf_hdr { ...@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48 #define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81 #define ICE_SID_XLT_KEY_BUILDER_PE 81
...@@ -291,6 +292,38 @@ struct ice_pkg_enum { ...@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
}; };
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
struct ice_tunnel_type_scan {
enum ice_tunnel_type type;
const char *label_prefix;
};
struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
u8 in_use;
u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
};
struct ice_pkg_es { struct ice_pkg_es {
__le16 count; __le16 count;
__le16 offset; __le16 offset;
......
...@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { ...@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)), ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)), ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
}; };
/* Bitmaps indicating relevant packet types for a particular protocol header /* Bitmaps indicating relevant packet types for a particular protocol header
...@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = { ...@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
}; };
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Manage parameters and info. used during the creation of a flow profile */ /* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params { struct ice_flow_prof_params {
enum ice_block blk; enum ice_block blk;
...@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) ...@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il; src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src, bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX); ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
} }
} }
...@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, ...@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL; prot_id = ICE_PROT_SCTP_IL;
break; break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
default: default:
return ICE_ERR_NOT_IMPL; return ICE_ERR_NOT_IMPL;
} }
...@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) ...@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S)) #define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1 #define ICE_RSS_OUTER_HEADERS 1
#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format: /* Flow profile ID format:
* [0:31] - Packet match fields * [0:31] - Packet match fields
...@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds, ...@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks); mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs, status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS); ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks); mutex_unlock(&hw->rss_locks);
return status; return status;
...@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ...@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS); ICE_RSS_OUTER_HEADERS);
if (status) if (status)
break; break;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_INNER_HEADERS);
if (status)
break;
} }
} }
mutex_unlock(&hw->rss_locks); mutex_unlock(&hw->rss_locks);
......
...@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr { ...@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
}; };
enum ice_flow_field { enum ice_flow_field {
...@@ -58,6 +59,8 @@ enum ice_flow_field { ...@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */ /* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX ICE_FLOW_FIELD_IDX_MAX
}; };
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2020, Intel Corporation. */
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
void ice_fltr_free_list(struct device *dev, struct list_head *h);
enum ice_status
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
enum ice_status
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
enum ice_status
ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
#endif
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
......
...@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits { ...@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
}; };
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
#define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
...@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits { ...@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40 ICE_TX_CTX_DESC_RESERVED = 0x40
}; };
enum ice_tx_ctx_desc_eipt_offload {
ICE_TX_CTX_EIPT_NONE = 0x0,
ICE_TX_CTX_EIPT_IPV6 = 0x1,
ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
ICE_TX_CTX_EIPT_IPV4 = 0x3
};
#define ICE_TXD_CTX_QW0_EIPLEN_S 2
#define ICE_TXD_CTX_QW0_L4TUNT_S 9
#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_QW0_NATLEN_S 12
#define ICE_TXD_CTX_QW0_L4T_CS_S 23
#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
#define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023 #define ICE_LAN_TXQ_MAX_QDIS 1023
......
This diff is collapsed.
...@@ -8,12 +8,6 @@ ...@@ -8,12 +8,6 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr);
void ice_free_fltr_list(struct device *dev, struct list_head *h);
void ice_update_eth_stats(struct ice_vsi *vsi); void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
...@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); ...@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int
ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
......
This diff is collapsed.
...@@ -366,6 +366,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) ...@@ -366,6 +366,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
return 0; return 0;
} }
/**
* ice_get_netlist_ver_info
* @hw: pointer to the HW struct
*
* Get the netlist version information
*/
static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
{
struct ice_netlist_ver_info *ver = &hw->netlist_ver;
enum ice_status ret;
u32 id_blk_start;
__le16 raw_data;
u16 data, i;
u16 *buff;
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret)
return ret;
buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
GFP_KERNEL);
if (!buff) {
ret = ICE_ERR_NO_MEMORY;
goto exit_no_mem;
}
/* read module length */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = le16_to_cpu(raw_data);
/* exit if length is = 0 */
if (!data)
goto exit_error;
/* read node count */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
/* netlist ID block starts from offset 4 + node count * 2 */
id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
/* read the entire netlist ID block */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
id_blk_start * 2,
ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
false, NULL);
if (ret)
goto exit_error;
for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
/* Read the left most 4 bytes of SHA */
ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
exit_error:
kfree(buff);
exit_no_mem:
ice_release_nvm(hw);
return ret;
}
/** /**
* ice_discover_flash_size - Discover the available flash size. * ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -515,6 +596,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) ...@@ -515,6 +596,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status; return status;
} }
/* read the netlist version information */
status = ice_get_netlist_ver_info(hw);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
return 0; return 0;
} }
......
...@@ -18,6 +18,7 @@ enum ice_prot_id { ...@@ -18,6 +18,7 @@ enum ice_prot_id {
ICE_PROT_IPV6_IL = 41, ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49, ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96, ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
......
...@@ -1917,7 +1917,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, ...@@ -1917,7 +1917,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*/ */
static enum ice_status static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 bw_alloc) enum ice_rl_type rl_type, u16 bw_alloc)
{ {
struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data; struct ice_aqc_txsched_elem *data;
......
...@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF) ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true; is_vf = true;
res_type = le16_to_cpu(ele->vsi_port_num) >> res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S; ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) { if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */ /* FW VSI is not needed. Just continue. */
...@@ -1618,12 +1618,12 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) ...@@ -1618,12 +1618,12 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr; struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head; struct list_head *rule_head;
u16 elem_sent, total_elem_left; u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw; struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */ struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0; enum ice_status status = 0;
u16 num_unicast = 0; u16 num_unicast = 0;
u16 s_rule_size; u8 elem_sent;
if (!m_list || !hw) if (!m_list || !hw)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
...@@ -1707,8 +1707,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) ...@@ -1707,8 +1707,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
total_elem_left -= elem_sent) { total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter; struct ice_aqc_sw_rules_elem *entry = r_iter;
elem_sent = min(total_elem_left, elem_sent = min_t(u8, total_elem_left,
(u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); (ICE_AQ_MAX_BUF_LEN / s_rule_size));
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
elem_sent, ice_aqc_opc_add_sw_rules, elem_sent, ice_aqc_opc_add_sw_rules,
NULL); NULL);
......
...@@ -819,7 +819,7 @@ static struct sk_buff * ...@@ -819,7 +819,7 @@ static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int metasize = xdp->data - xdp->data_meta; u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2; unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else #else
...@@ -934,7 +934,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -934,7 +934,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/ */
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{ {
u32 ntc = rx_ring->next_to_clean + 1; u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */ /* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0; ntc = (ntc < rx_ring->count) ? ntc : 0;
...@@ -1544,7 +1544,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1544,7 +1544,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit * don't allow the budget to go below 1 because that would exit
* polling early. * polling early.
*/ */
budget_per_ring = max(budget / q_vector->num_ring_rx, 1); budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else else
/* Max of 1 Rx ring in this q_vector so give it the budget */ /* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget; budget_per_ring = budget;
...@@ -1680,7 +1680,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1680,7 +1680,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/ */
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) { while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, max_data, td_tag); ice_build_ctob(td_cmd, td_offset, max_data,
td_tag);
tx_desc++; tx_desc++;
i++; i++;
...@@ -1700,7 +1701,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1700,7 +1701,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len)) if (likely(!data_len))
break; break;
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
size, td_tag); size, td_tag);
tx_desc++; tx_desc++;
...@@ -1732,8 +1733,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1732,8 +1733,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */ /* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD; td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, tx_desc->cmd_type_offset_bsz =
td_tag); ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. * are new descriptors to fetch.
...@@ -1807,12 +1808,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1807,12 +1808,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data; l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S; offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
if (skb->encapsulation) protocol = vlan_get_protocol(skb);
if (protocol == htons(ETH_P_IP))
first->tx_flags |= ICE_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
bool gso_ena = false;
u32 tunnel = 0;
/* define outer network header type */
if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
ICE_TX_CTX_EIPT_IPV4 :
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
}
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_GRE:
tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
l4.hdr = skb_inner_network_header(skb);
break;
default:
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1; return -1;
skb_checksum_help(skb);
return 0;
}
/* compute outer L3 header size */
tunnel |= ((l4.hdr - ip.hdr) / 4) <<
ICE_TXD_CTX_QW0_EIPLEN_S;
/* switch IP header pointer from outer to inner header */
ip.hdr = skb_inner_network_header(skb);
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
ICE_TXD_CTX_QW0_NATLEN_S;
gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
/* indicate if we need to offload outer UDP header */
if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
/* record tunnel offload values */
off->cd_tunnel_params |= tunnel;
/* set DTYP=1 to indicate that it's an Tx context descriptor
* in IPsec tunnel mode with Tx offloads in Quad word 1
*/
off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
/* switch L4 header pointer from outer to inner */
l4.hdr = skb_inner_transport_header(skb);
l4_proto = 0;
/* reset type as we transition from outer to inner headers */
first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */ /* Enable IP checksum offloads */
protocol = vlan_get_protocol(skb); if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
if (protocol == htons(ETH_P_IP)) {
l4_proto = ip.v4->protocol; l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we /* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO. * need the hardware to recompute it is in the case of TSO.
...@@ -1822,7 +1905,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1822,7 +1905,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
} else if (protocol == htons(ETH_P_IPV6)) { } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6); exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr; l4_proto = ip.v6->nexthdr;
...@@ -1944,7 +2027,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1944,7 +2027,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u64 cd_mss, cd_tso_len; u64 cd_mss, cd_tso_len;
u32 paylen, l4_start; u32 paylen;
u8 l4_start;
int err; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) if (skb->ip_summed != CHECKSUM_PARTIAL)
...@@ -1969,8 +2053,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1969,8 +2053,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0; ip.v6->payload_len = 0;
} }
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
l4.udp->len = 0;
/* determine offset of outer transport header */
l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from outer checksum */
paylen = skb->len - l4_start;
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
/* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
/* initialize inner IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
} else {
ip.v6->payload_len = 0;
}
}
/* determine offset of transport header */ /* determine offset of transport header */
l4_start = l4.hdr - skb->data; l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */ /* remove payload length from checksum */
paylen = skb->len - l4_start; paylen = skb->len - l4_start;
...@@ -1979,12 +2097,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ...@@ -1979,12 +2097,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check, csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen)); (__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */ /* compute length of UDP segmentation header */
off->header_len = sizeof(l4.udp) + l4_start; off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else { } else {
csum_replace_by_diff(&l4.tcp->check, csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen)); (__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */ /* compute length of TCP segmentation header */
off->header_len = (l4.tcp->doff * 4) + l4_start; off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
} }
/* update gso_segs and bytecount */ /* update gso_segs and bytecount */
...@@ -2215,7 +2333,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) ...@@ -2215,7 +2333,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc; struct ice_tx_ctx_desc *cdesc;
int i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
/* grab the next descriptor */ /* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i); cdesc = ICE_TX_CTX_DESC(tx_ring, i);
......
...@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void) ...@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_TSO BIT(0) #define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1) #define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2) #define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000 #define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000 #define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29 #define ICE_TX_FLAGS_VLAN_PR_S 29
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* @rx_ring: ring to bump * @rx_ring: ring to bump
* @val: new head index * @val: new head index
*/ */
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
{ {
u16 prev_ntu = rx_ring->next_to_use & ~0x7; u16 prev_ntu = rx_ring->next_to_use & ~0x7;
...@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype) union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{ {
struct ice_rx_ptype_decoded decoded; struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status; u16 rx_error, rx_status;
u16 rx_stat_err1;
bool ipv4, ipv6; bool ipv4, ipv6;
rx_status = le16_to_cpu(rx_desc->wb.status_error0); rx_status = le16_to_cpu(rx_desc->wb.status_error0);
rx_error = rx_status; rx_error = rx_status & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
rx_stat_err1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype); decoded = ice_decode_rx_desc_ptype(ptype);
/* Start with CHECKSUM_NONE and by default csum_level = 0 */ /* Start with CHECKSUM_NONE and by default csum_level = 0 */
...@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb, ...@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
goto checksum_fail; goto checksum_fail;
/* check for outer UDP checksum error in tunneled packets */
if ((rx_stat_err1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) { switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP: case ICE_RX_PTYPE_INNER_PROT_TCP:
...@@ -215,7 +232,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring) ...@@ -215,7 +232,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i); tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0, tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
size, 0); size, 0);
/* Make certain all of the status bits have been updated /* Make certain all of the status bits have been updated
......
...@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits) ...@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
} }
static inline __le64 static inline __le64
build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{ {
return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA | return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
(td_cmd << ICE_TXD_QW1_CMD_S) | (td_cmd << ICE_TXD_QW1_CMD_S) |
...@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring) ...@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res); void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring); int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring); int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val); void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void void
ice_process_skb_fields(struct ice_ring *rx_ring, ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, union ice_32b_rx_flex_desc *rx_desc,
......
...@@ -259,6 +259,16 @@ struct ice_nvm_info { ...@@ -259,6 +259,16 @@ struct ice_nvm_info {
#define ICE_NVM_VER_LEN 32 #define ICE_NVM_VER_LEN 32
/* netlist version information */
struct ice_netlist_ver_info {
u32 major; /* major high/low */
u32 minor; /* minor high/low */
u32 type; /* type high/low */
u32 rev; /* revision high/low */
u32 hash; /* SHA-1 hash word */
u16 cust_ver; /* customer version */
};
/* Max number of port to queue branches w.r.t topology */ /* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8 #define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
...@@ -491,8 +501,8 @@ struct ice_hw { ...@@ -491,8 +501,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */ u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */ /* Tx Scheduler values */
u16 num_tx_sched_layers; u8 num_tx_sched_layers;
u16 num_tx_sched_phys_layers; u8 num_tx_sched_phys_layers;
u8 flattened_layers; u8 flattened_layers;
u8 max_cgds; u8 max_cgds;
u8 sw_entry_point_layer; u8 sw_entry_point_layer;
...@@ -506,6 +516,7 @@ struct ice_hw { ...@@ -506,6 +516,7 @@ struct ice_hw {
struct ice_nvm_info nvm; struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */ struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */ struct ice_hw_func_caps func_caps; /* function capabilities */
struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */ struct ice_switch_info *switch_info; /* switch filter lists */
...@@ -568,6 +579,10 @@ struct ice_hw { ...@@ -568,6 +579,10 @@ struct ice_hw {
u8 *pkg_copy; u8 *pkg_copy;
u32 pkg_size; u32 pkg_size;
/* tunneling info */
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
/* HW block tables */ /* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT]; struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
...@@ -592,6 +607,8 @@ struct ice_eth_stats { ...@@ -592,6 +607,8 @@ struct ice_eth_stats {
u64 tx_errors; /* tepc */ u64 tx_errors; /* tepc */
}; };
#define ICE_MAX_UP 8
/* Statistics collected by the MAC */ /* Statistics collected by the MAC */
struct ice_hw_port_stats { struct ice_hw_port_stats {
/* eth stats collected by the port */ /* eth stats collected by the port */
......
...@@ -64,7 +64,7 @@ struct ice_mdd_vf_events { ...@@ -64,7 +64,7 @@ struct ice_mdd_vf_events {
struct ice_vf { struct ice_vf {
struct ice_pf *pf; struct ice_pf *pf;
s16 vf_id; /* VF ID in the PF space */ u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */ u16 lan_vsi_idx; /* index into PF struct */
/* first vector index of this VF in the PF space */ /* first vector index of this VF in the PF space */
int first_vector_idx; int first_vector_idx;
...@@ -128,6 +128,7 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf); ...@@ -128,6 +128,7 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int int
ice_get_vf_stats(struct net_device *netdev, int vf_id, ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats); struct ifla_vf_stats *vf_stats);
bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
void void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vfs_mdd_events(struct ice_pf *pf);
...@@ -219,5 +220,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev, ...@@ -219,5 +220,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
{
return false;
}
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */ #endif /* _ICE_VIRTCHNL_PF_H_ */
...@@ -988,8 +988,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) ...@@ -988,8 +988,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buf_addr = cpu_to_le64(dma); tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, tx_desc->cmd_type_offset_bsz =
0, desc.len, 0); ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
xdp_ring->next_to_use++; xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count) if (xdp_ring->next_to_use == xdp_ring->count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment