Commit 59b8d277 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-05-21

This series contains updates to ice driver only.  Several of the changes
are fixes, which could be backported to stable, of which, only one was
marked for stable because of the memory leak potential.

Jake exposes the information in the flash memory used for link
management, which is called the netlist module.

Henry and Tony add support for tunnel offloads.

Brett adds promiscuous support in VF's which is based on VF trust and
the new vf-true-promisc flag.

Avinash fixes an issue where a transmit timeout for a queue that belongs
to a PFC enabled TC is not a true transmit timeout, but because the PFC
is in action.

Dave fixes the check for contiguous TCs to allow for various UP2TC
mapping configurations.  Also fixed an issue when changing the pause
parameters would could multiple link drop/down's in succession, which in
turn caused the firmware to not generate a link interrupt for the driver
to respond to.

Anirudh (Ani) fixed a potential race condition in probe/open due to a
bit being cleared too early.

Lihong updates an error message to make it more meaningful instead of
just printing out the numerical value of the status/error code.  Also
fixed an incorrect return value if deleting a filter does not find a
match to delete or when adding a filter that already exists.

Karol fixes casting issues and precision loss in the driver.

Jesse make the sign usage more consistent in the driver by making sure
all instances of vf_id are unsigned, since it can never be negative.

Eric fixes a potential memory leak in ice_add_prof_id_vsig() where was
not cleaning up resources properly when an error occurs.

Michal to help organize the filtering code in the driver, refactor the
code into a separate file and add functions to prepare the filter
information.

Bruce cleaned up a conditional statement that always resulted in true
and provided a comment to make it more obvious.  Also cleaned up
redundant code checks.

Tony helps with potential namespace issues by renaming a 'ice' specific
function with the driver name prepended.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4001f1f0 5757cc7c
......@@ -69,6 +69,17 @@ The ``ice`` driver reports the following versions
- The version of the DDP package that is active in the device. Note
that both the name (as reported by ``fw.app.name``) and version are
required to uniquely identify the package.
* - ``fw.netlist``
- running
- 1.1.2000-6.7.0
- The version of the netlist module. This module defines the device's
Ethernet capabilities and default settings, and is used by the
management firmware as part of managing link and device
connectivity.
* - ``fw.netlist.build``
- running
- 0xee16ced7
- The first 4 bytes of the hash of the netlist module contents.
Regions
=======
......
......@@ -17,6 +17,7 @@ ice-y := ice_main.o \
ice_lib.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
ice_flex_pipe.o \
ice_flow.o \
ice_devlink.o \
......
......@@ -37,6 +37,10 @@
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
......@@ -244,8 +248,8 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
int num_q_vectors;
int base_vector; /* IRQ base for OS reserved vectors */
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
......@@ -341,6 +345,7 @@ enum ice_pf_flags {
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_PF_FLAGS_NBITS /* must be last */
};
......@@ -366,7 +371,7 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
......@@ -385,11 +390,11 @@ struct ice_pf {
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
......@@ -523,6 +528,8 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
......
......@@ -541,7 +541,7 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
(0x3 << \ ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
(0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
/* Bit 17:18 - Defines other actions */
/* Other action = 0 - Mirror VSI */
......@@ -1264,6 +1264,33 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
/* The result of netlist NVM read comes in a TLV format. The actual data
* (netlist header) starts from word offset 1 (byte 2). The FW strips
* out the type field from the TLV header so all the netlist fields
* should adjust their offset value by 1 word (2 bytes) in order to map
* their correct location.
*/
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
/* netlist ID block field offsets (word offsets) */
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
/**
* Send to PF command (indirect 0x0801) ID is only used by PF
*
......
......@@ -24,7 +24,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
for (i = 0; i < qs_cfg->q_count; i++)
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
mutex_unlock(qs_cfg->qs_mutex);
return 0;
......@@ -47,7 +47,7 @@ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
if (index >= qs_cfg->pf_map_size)
goto err_scatter;
set_bit(index, qs_cfg->pf_map);
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
}
mutex_unlock(qs_cfg->qs_mutex);
......@@ -96,7 +96,7 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
{
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
......@@ -376,7 +376,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
chain_len * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
......@@ -453,7 +453,7 @@ int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
if (ret) {
/* contig failed, so try with scatter approach */
qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
qs_cfg->scatter_count);
ret = __ice_vsi_get_qs_sc(qs_cfg);
}
......@@ -526,7 +526,8 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
int v_idx, err;
u16 v_idx;
int err;
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
......@@ -562,7 +563,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
u16 tx_rings_rem, rx_rings_rem;
int v_id;
/* initially assigning remaining rings count to VSIs num queue value */
......@@ -571,10 +572,12 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
for (v_id = 0; v_id < q_vectors; v_id++) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
u8 tx_rings_per_v, rx_rings_per_v;
u16 q_id, q_base;
/* Tx rings mapping to vector */
tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
q_vector->tx.ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
......@@ -590,7 +593,8 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_rem -= tx_rings_per_v;
/* Rx rings mapping to vector */
rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
q_vector->rx.ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
......@@ -662,8 +666,8 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
return -ENODEV;
}
......@@ -832,8 +836,8 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
status);
dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
ice_stat_str(status));
return -ENODEV;
}
......
......@@ -746,6 +746,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
return 0;
err_unroll_fltr_mgmt_struct:
......@@ -775,6 +776,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_clear_agg(hw);
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
......
......@@ -1128,7 +1128,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
memcpy(&e->desc, desc, sizeof(e->desc));
datalen = le16_to_cpu(desc->datalen);
e->msg_len = min(datalen, e->buf_len);
e->msg_len = min_t(u16, datalen, e->buf_len);
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
......
......@@ -62,6 +62,64 @@ u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
return ena_tc;
}
/**
* ice_is_pfc_causing_hung_q
* @pf: pointer to PF structure
* @txqueue: Tx queue which is supposedly hung queue
*
* find if PFC is causing the hung queue, if yes return true else false
*/
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue)
{
u8 num_tcs = 0, i, tc, up_mapped_tc, up_in_tc = 0;
u64 ref_prio_xoff[ICE_MAX_UP];
struct ice_vsi *vsi;
u32 up2tc;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return false;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
num_tcs++;
/* first find out the TC to which the hung queue belongs to */
for (tc = 0; tc < num_tcs - 1; tc++)
if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset,
vsi->tc_cfg.tc_info[tc + 1].qoffset,
txqueue))
break;
/* Build a bit map of all UPs associated to the suspect hung queue TC,
* so that we check for its counter increment.
*/
up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
for (i = 0; i < ICE_MAX_UP; i++) {
up_mapped_tc = (up2tc >> (i * 3)) & 0x7;
if (up_mapped_tc == tc)
up_in_tc |= BIT(i);
}
/* Now that we figured out that hung queue is PFC enabled, still the
* Tx timeout can be legitimate. So to make sure Tx timeout is
* absolutely caused by PFC storm, check if the counters are
* incrementing.
*/
for (i = 0; i < ICE_MAX_UP; i++)
if (up_in_tc & BIT(i))
ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i];
ice_update_dcb_stats(pf);
for (i = 0; i < ICE_MAX_UP; i++)
if (up_in_tc & BIT(i))
if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i])
return true;
return false;
}
/**
* ice_dcb_get_mode - gets the DCB mode
* @port_info: pointer to port info structure
......@@ -526,16 +584,21 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
*/
static bool ice_dcb_tc_contig(u8 *prio_table)
{
u8 max_tc = 0;
bool found_empty = false;
u8 used_tc = 0;
int i;
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
u8 cur_tc = prio_table[i];
/* Create a bitmap of used TCs */
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
used_tc |= BIT(prio_table[i]);
if (cur_tc > max_tc)
return false;
else if (cur_tc == max_tc)
max_tc++;
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) {
if (used_tc & BIT(i)) {
if (found_empty)
return false;
} else {
found_empty = true;
}
}
return true;
......
......@@ -17,6 +17,8 @@
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
......@@ -32,6 +34,20 @@ void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
* @high: start of queue for next TC
* @tx_q: hung_queue/tx_queue
*
* finds if queue 'tx_q' falls between the two offsets of any given TC
*/
static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
{
return (tx_q >= low) && (tx_q < high);
}
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
......@@ -79,6 +95,13 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
{
return false;
}
#define ice_update_dcb_stats(pf) do {} while (0)
#define ice_pf_dcb_recfg(pf) do {} while (0)
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
......
......@@ -105,6 +105,27 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
/* The netlist version fields are BCD formatted */
snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
netlist->cust_ver);
return 0;
}
static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
snprintf(buf, len, "0x%08x", netlist->hash);
return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
......@@ -128,6 +149,8 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
};
/**
......
......@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_flow.h"
#include "ice_fltr.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
......@@ -157,6 +158,8 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
......@@ -273,8 +276,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
......@@ -282,8 +286,9 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
false);
if (status) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto release;
}
......@@ -332,7 +337,8 @@ static u64 ice_link_test(struct net_device *netdev)
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
netdev_err(netdev, "link query error, status = %d\n", status);
netdev_err(netdev, "link query error, status = %s\n",
ice_stat_str(status));
return 1;
}
......@@ -671,7 +677,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
LIST_HEAD(tmp_list);
struct device *dev;
u8 *tx_frame;
int i;
......@@ -707,16 +712,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
/* Test VSI needs to receive broadcast packets */
eth_broadcast_addr(broadcast);
if (ice_add_mac_to_list(test_vsi, &tmp_list, broadcast)) {
if (ice_fltr_add_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) {
ret = 5;
goto lbtest_mac_dis;
}
if (ice_add_mac(&pf->hw, &tmp_list)) {
ret = 6;
goto free_mac_list;
}
if (ice_lbtest_create_frame(pf, &tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 7;
goto remove_mac_filters;
......@@ -739,10 +739,8 @@ static u64 ice_loopback_test(struct net_device *netdev)
lbtest_free_frame:
devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list))
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
free_mac_list:
ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
......@@ -1158,8 +1156,9 @@ static int ice_nway_reset(struct net_device *netdev)
status = ice_aq_set_link_restart_an(pi, false, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
return -EIO;
}
......@@ -1308,6 +1307,16 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_down(vsi);
ice_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
ice_is_any_vf_in_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
}
......@@ -2450,8 +2459,8 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
vsi->vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
vsi->vsi_num, ice_stat_str(status));
return -EINVAL;
}
......@@ -2593,7 +2602,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
u32 new_rx_cnt, new_tx_cnt;
u16 new_rx_cnt, new_tx_cnt;
if (ring->tx_pending > ICE_MAX_NUM_DESC ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
......@@ -2645,8 +2654,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
if (ice_is_xdp_ena_vsi(vsi))
for (i = 0; i < vsi->num_xdp_txq; i++)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = new_tx_cnt;
vsi->num_rx_desc = new_rx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
......@@ -2952,31 +2961,22 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
status = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
if (!test_bit(__ICE_DOWN, pf->state)) {
/* Give it a little more time to try to come back. If still
* down, restart autoneg link or reinitialize the interface.
*/
msleep(75);
if (!test_bit(__ICE_DOWN, pf->state))
return ice_nway_reset(netdev);
ice_down(vsi);
ice_up(vsi);
}
return err;
}
......@@ -3227,8 +3227,9 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type, lut,
vsi->rss_table_size);
if (status) {
dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
err = -EIO;
}
......
......@@ -5,6 +5,15 @@
#include "ice_flex_pipe.h"
#include "ice_flow.h"
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
{ TNL_LAST, "" }
};
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
......@@ -239,6 +248,268 @@ ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
return state->sect;
}
/**
* ice_pkg_enum_entry
* @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
* @state: pointer to the enum state
* @sect_type: section type to enumerate
* @offset: pointer to variable that receives the offset in the table (optional)
* @handler: function that handles access to the entries into the section type
*
* This function will enumerate all the entries in particular section type in
* the ice segment. The first call is made with the ice_seg parameter non-NULL;
* on subsequent calls, ice_seg is set to NULL which continues the enumeration.
* When the function returns a NULL pointer, then the end of the entries has
* been reached.
*
* Since each section may have a different header and entry size, the handler
* function is needed to determine the number and location entries in each
* section.
*
* The offset parameter is optional, but should be used for sections that
* contain an offset for each section table. For such cases, the section handler
* function must return the appropriate offset + index to give the absolution
* offset for each entry. For example, if the base for a section's header
* indicates a base offset of 10, and the index for the entry is 2, then
* section handler function should set the offset to 10 + 2 = 12.
*/
static void *
ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type, u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset))
{
void *entry;
if (ice_seg) {
if (!handler)
return NULL;
if (!ice_pkg_enum_section(ice_seg, state, sect_type))
return NULL;
state->entry_idx = 0;
state->handler = handler;
} else {
state->entry_idx++;
}
if (!state->handler)
return NULL;
/* get entry */
entry = state->handler(state->sect_type, state->sect, state->entry_idx,
offset);
if (!entry) {
/* end of a section, look for another section of this type */
if (!ice_pkg_enum_section(NULL, state, 0))
return NULL;
state->entry_idx = 0;
entry = state->handler(state->sect_type, state->sect,
state->entry_idx, offset);
}
return entry;
}
/**
* ice_boost_tcam_handler
* @sect_type: section type
* @section: pointer to section
* @index: index of the boost TCAM entry to be returned
* @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
*
* This is a callback function that can be passed to ice_pkg_enum_entry.
* Handles enumeration of individual boost TCAM entries.
*/
static void *
ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
{
struct ice_boost_tcam_section *boost;
if (!section)
return NULL;
if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
return NULL;
if (index > ICE_MAX_BST_TCAMS_IN_BUF)
return NULL;
if (offset)
*offset = 0;
boost = section;
if (index >= le16_to_cpu(boost->count))
return NULL;
return boost->tcam + index;
}
/**
* ice_find_boost_entry
* @ice_seg: pointer to the ice segment (non-NULL)
* @addr: Boost TCAM address of entry to search for
* @entry: returns pointer to the entry
*
* Finds a particular Boost TCAM entry and returns a pointer to that entry
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
static enum ice_status
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
struct ice_boost_tcam_entry *tcam;
struct ice_pkg_enum state;
memset(&state, 0, sizeof(state));
if (!ice_seg)
return ICE_ERR_PARAM;
do {
tcam = ice_pkg_enum_entry(ice_seg, &state,
ICE_SID_RXPARSER_BOOST_TCAM, NULL,
ice_boost_tcam_handler);
if (tcam && le16_to_cpu(tcam->addr) == addr) {
*entry = tcam;
return 0;
}
ice_seg = NULL;
} while (tcam);
*entry = NULL;
return ICE_ERR_CFG;
}
/**
* ice_label_enum_handler
* @sect_type: section type
* @section: pointer to section
* @index: index of the label entry to be returned
* @offset: pointer to receive absolute offset, always zero for label sections
*
* This is a callback function that can be passed to ice_pkg_enum_entry.
* Handles enumeration of individual label entries.
*/
static void *
ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
u32 *offset)
{
struct ice_label_section *labels;
if (!section)
return NULL;
if (index > ICE_MAX_LABELS_IN_BUF)
return NULL;
if (offset)
*offset = 0;
labels = section;
if (index >= le16_to_cpu(labels->count))
return NULL;
return labels->label + index;
}
/**
* ice_enum_labels
* @ice_seg: pointer to the ice segment (NULL on subsequent calls)
* @type: the section type that will contain the label (0 on subsequent calls)
* @state: ice_pkg_enum structure that will hold the state of the enumeration
* @value: pointer to a value that will return the label's value if found
*
* Enumerates a list of labels in the package. The caller will call
* ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
* ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
* the end of the list has been reached.
*/
static char *
ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
u16 *value)
{
struct ice_label *label;
/* Check for valid label section on first call */
if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
return NULL;
label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
ice_label_enum_handler);
if (!label)
return NULL;
*value = le16_to_cpu(label->value);
return label->name;
}
/**
* ice_init_pkg_hints
* @hw: pointer to the HW structure
* @ice_seg: pointer to the segment of the package scan (non-NULL)
*
* This function will scan the package and save off relevant information
* (hints or metadata) for driver use. The ice_seg parameter must not be NULL
* since the first call to ice_enum_labels requires a pointer to an actual
* ice_seg structure.
*/
static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_pkg_enum state;
char *label_name;
u16 val;
int i;
memset(&hw->tnl, 0, sizeof(hw->tnl));
memset(&state, 0, sizeof(state));
if (!ice_seg)
return;
label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
&val);
while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
for (i = 0; tnls[i].type != TNL_LAST; i++) {
size_t len = strlen(tnls[i].label_prefix);
/* Look for matching label start, before continuing */
if (strncmp(label_name, tnls[i].label_prefix, len))
continue;
/* Make sure this label matches our PF. Note that the PF
* character ('0' - '7') will be located where our
* prefix string's null terminator is located.
*/
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
hw->tnl.tbl[hw->tnl.count].in_use = false;
hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
break;
}
}
label_name = ice_enum_labels(NULL, 0, &state, &val);
}
/* Cache the appropriate boost TCAM entry pointers */
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
if (hw->tnl.tbl[i].boost_entry)
hw->tnl.tbl[i].valid = true;
}
}
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
......@@ -1050,7 +1321,8 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
return ICE_ERR_CFG;
}
/* download package */
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
ice_debug(hw, ICE_DBG_INIT,
......@@ -1292,6 +1564,256 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
/**
* ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
* @hw: pointer to the HW structure
* @port: port to search for
* @index: optionally returns index
*
* Returns whether a port is already in use as a tunnel, and optionally its
* index
*/
static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
if (index)
*index = i;
return true;
}
return false;
}
/**
* ice_tunnel_port_in_use
* @hw: pointer to the HW structure
* @port: port to search for
* @index: optionally returns index
*
* Returns whether a port is already in use as a tunnel, and optionally its
* index
*/
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
{
bool res;
mutex_lock(&hw->tnl_lock);
res = ice_tunnel_port_in_use_hlpr(hw, port, index);
mutex_unlock(&hw->tnl_lock);
return res;
}
/**
* ice_find_free_tunnel_entry
* @hw: pointer to the HW structure
* @type: tunnel type
* @index: optionally returns index
*
* Returns whether there is a free tunnel entry, and optionally its index
*/
static bool
ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *index)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
hw->tnl.tbl[i].type == type) {
if (index)
*index = i;
return true;
}
return false;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
* @type: type of tunnel
* @port: port of tunnel to create
*
* Create a tunnel by updating the parse graph in the parser. We do that by
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
u16 index;
mutex_lock(&hw->tnl_lock);
if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
hw->tnl.tbl[index].ref++;
status = 0;
goto ice_create_tunnel_end;
}
if (!ice_find_free_tunnel_entry(hw, type, &index)) {
status = ICE_ERR_OUT_OF_RANGE;
goto ice_create_tunnel_end;
}
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
goto ice_create_tunnel_end;
}
/* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_create_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
sizeof(*sect_rx));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
sizeof(*sect_tx));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = cpu_to_le16(1);
/* copy original boost entry to update package buffer */
memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
sizeof(*sect_rx->tcam));
/* over-write the never-match dest port key bits with the encoded port
* bits
*/
ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
(u8 *)&port, NULL, NULL, NULL,
(u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
/* exact copy of entry to Tx section entry */
memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status) {
hw->tnl.tbl[index].port = port;
hw->tnl.tbl[index].in_use = true;
hw->tnl.tbl[index].ref = 1;
}
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
ice_create_tunnel_end:
mutex_unlock(&hw->tnl_lock);
return status;
}
/**
* ice_destroy_tunnel
* @hw: pointer to the HW structure
* @port: port of tunnel to destroy (ignored if the all parameter is true)
* @all: flag that states to destroy all tunnels
*
* Destroys a tunnel or all tunnels by creating an update package buffer
* targeting the specific updates requested and then performing an update
* package.
*/
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
u16 count = 0;
u16 index;
u16 size;
u16 i;
mutex_lock(&hw->tnl_lock);
if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
if (hw->tnl.tbl[index].ref > 1) {
hw->tnl.tbl[index].ref--;
status = 0;
goto ice_destroy_tunnel_end;
}
/* determine count */
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
(all || hw->tnl.tbl[i].port == port))
count++;
if (!count) {
status = ICE_ERR_PARAM;
goto ice_destroy_tunnel_end;
}
/* size of section - there is at least one entry */
size = struct_size(sect_rx, tcam, count - 1);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
goto ice_destroy_tunnel_end;
}
/* allocate 2 sections, one for Rx parser, one for Tx parser */
if (ice_pkg_buf_reserve_section(bld, 2))
goto ice_destroy_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
size);
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
size);
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = cpu_to_le16(1);
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
(all || hw->tnl.tbl[i].port == port)) {
memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
sizeof(*sect_rx->tcam));
memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
sizeof(*sect_tx->tcam));
hw->tnl.tbl[i].marked = true;
}
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status)
for (i = 0; i < hw->tnl.count &&
i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].marked) {
hw->tnl.tbl[i].ref = 0;
hw->tnl.tbl[i].port = 0;
hw->tnl.tbl[i].in_use = false;
hw->tnl.tbl[i].marked = false;
}
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
ice_destroy_tunnel_end:
mutex_unlock(&hw->tnl_lock);
return status;
}
/* PTG Management */
/**
......@@ -1807,9 +2329,10 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u16 off, i;
u16 off;
u8 i;
for (i = 0; i < es->count; i++) {
for (i = 0; i < (u8)es->count; i++) {
off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
......@@ -2939,7 +3462,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
u32 byte = 0;
u8 byte = 0;
u8 prof_id;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
......@@ -2974,7 +3497,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* build list of ptgs */
while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
u32 bit;
u8 bit;
if (!ptypes[byte]) {
bytes--;
......@@ -3008,7 +3531,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
break;
/* nothing left in byte, then exit */
m = ~((1 << (bit + 1)) - 1);
m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
break;
}
......@@ -3705,8 +4228,10 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].prof_id,
t->tcam[i].ptg, vsig, 0, 0,
vl_msk, dc_msk, nm_msk);
if (status)
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
}
/* log change */
list_add(&p->list_entry, chg);
......
......@@ -18,6 +18,11 @@
#define ICE_PKG_CNT 4
enum ice_status
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
......
......@@ -149,6 +149,7 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
......@@ -291,6 +292,38 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
struct ice_tunnel_type_scan {
enum ice_tunnel_type type;
const char *label_prefix;
};
struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
u8 in_use;
u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
};
struct ice_pkg_es {
__le16 count;
__le16 offset;
......
......@@ -42,7 +42,10 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
......@@ -134,6 +137,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
......@@ -225,6 +240,12 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
}
}
......@@ -275,6 +296,9 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
default:
return ICE_ERR_NOT_IMPL;
}
......@@ -945,6 +969,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
#define ICE_RSS_INNER_HEADERS 2
/* Flow profile ID format:
* [0:31] - Packet match fields
......@@ -1085,6 +1110,9 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
mutex_unlock(&hw->rss_locks);
return status;
......@@ -1238,6 +1266,12 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ICE_RSS_OUTER_HEADERS);
if (status)
break;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_INNER_HEADERS);
if (status)
break;
}
}
mutex_unlock(&hw->rss_locks);
......
......@@ -43,6 +43,7 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
};
enum ice_flow_field {
......@@ -58,6 +59,8 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
#include "ice_fltr.h"
/**
* ice_fltr_free_list - free filter lists helper
* @dev: pointer to the device struct
* @h: pointer to the list head to be freed
*
* Helper function to free filter lists previously created using
* ice_fltr_add_mac_to_list
*/
void ice_fltr_free_list(struct device *dev, struct list_head *h)
{
struct ice_fltr_list_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, h, list_entry) {
list_del(&e->list_entry);
devm_kfree(dev, e);
}
}
/**
* ice_fltr_add_entry_to_list - allocate and add filter entry to list
* @dev: pointer to device needed by alloc function
* @info: filter info struct that gets added to the passed in list
* @list: pointer to the list which contains MAC filters entry
*/
static int
ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
struct list_head *list)
{
struct ice_fltr_list_entry *entry;
entry = devm_kzalloc(dev, sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
entry->fltr_info = *info;
INIT_LIST_HEAD(&entry->list_entry);
list_add(&entry->list_entry, list);
return 0;
}
/**
* ice_fltr_add_mac_list - add list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
enum ice_status
ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_mac(&vsi->back->hw, list);
}
/**
* ice_fltr_remove_mac_list - remove list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
enum ice_status
ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_mac(&vsi->back->hw, list);
}
/**
* ice_fltr_add_vlan_list - add list of VLAN filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
static enum ice_status
ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_vlan(&vsi->back->hw, list);
}
/**
* ice_fltr_remove_vlan_list - remove list of VLAN filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
static enum ice_status
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_vlan(&vsi->back->hw, list);
}
/**
* ice_fltr_add_eth_list - add list of ethertype filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
static enum ice_status
ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_eth_mac(&vsi->back->hw, list);
}
/**
* ice_fltr_remove_eth_list - remove list of ethertype filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
static enum ice_status
ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_eth_mac(&vsi->back->hw, list);
}
/**
* ice_fltr_remove_all - remove all filters associated with VSI
* @vsi: pointer to VSI struct
*/
void ice_fltr_remove_all(struct ice_vsi *vsi)
{
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
}
/**
* ice_fltr_add_mac_to_list - add MAC filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
* @mac: MAC address to add
* @action: filter action
*/
int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_MAC;
info.fltr_act = action;
info.vsi_handle = vsi->idx;
ether_addr_copy(info.l_data.mac.mac_addr, mac);
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
}
/**
* ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
* @vlan_id: VLAN ID to add
* @action: filter action
*/
static int
ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
u16 vlan_id, enum ice_sw_fwd_act_type action)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_VLAN;
info.fltr_act = action;
info.vsi_handle = vsi->idx;
info.l_data.vlan.vlan_id = vlan_id;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
}
/**
* ice_fltr_add_eth_to_list - add ethertype filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
* @ethertype: ethertype of packet that matches filter
* @flag: filter direction, Tx or Rx
* @action: filter action
*/
static int
ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action)
{
struct ice_fltr_info info = { 0 };
info.flag = flag;
info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
info.fltr_act = action;
info.vsi_handle = vsi->idx;
info.l_data.ethertype_mac.ethertype = ethertype;
if (flag == ICE_FLTR_TX)
info.src_id = ICE_SRC_ID_VSI;
else
info.src_id = ICE_SRC_ID_LPORT;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
}
/**
* ice_fltr_prepare_mac - add or remove MAC rule
* @vsi: pointer to VSI struct
* @mac: MAC address to add
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
static enum ice_status
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
enum ice_status (*mac_action)(struct ice_vsi *,
struct list_head *))
{
enum ice_status result;
LIST_HEAD(tmp_list);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY;
}
result = mac_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return result;
}
/**
* ice_fltr_prepare_mac_and_broadcast - add or remove MAC and broadcast filter
* @vsi: pointer to VSI struct
* @mac: MAC address to add
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
static enum ice_status
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
enum ice_status(*mac_action)
(struct ice_vsi *, struct list_head *))
{
u8 broadcast[ETH_ALEN];
enum ice_status result;
LIST_HEAD(tmp_list);
eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return ICE_ERR_NO_MEMORY;
}
result = mac_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return result;
}
/**
* ice_fltr_prepare_vlan - add or remove VLAN filter
* @vsi: pointer to VSI struct
* @vlan_id: VLAN ID to add
* @action: action to be performed on filter match
* @vlan_action: pointer to add or remove VLAN function
*/
static enum ice_status
ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action,
enum ice_status (*vlan_action)(struct ice_vsi *,
struct list_head *))
{
enum ice_status result;
LIST_HEAD(tmp_list);
if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
return ICE_ERR_NO_MEMORY;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return result;
}
/**
* ice_fltr_prepare_eth - add or remove ethertype filter
* @vsi: pointer to VSI struct
* @ethertype: ethertype of packet to be filtered
* @flag: direction of packet, Tx or Rx
* @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function
*/
static enum ice_status
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action,
enum ice_status (*eth_action)(struct ice_vsi *,
struct list_head *))
{
enum ice_status result;
LIST_HEAD(tmp_list);
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
return ICE_ERR_NO_MEMORY;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
return result;
}
/**
* ice_fltr_add_mac - add single MAC filter
* @vsi: pointer to VSI struct
* @mac: MAC to add
* @action: action to be performed on filter match
*/
enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
}
/**
* ice_fltr_add_mac_and_broadcast - add single MAC and broadcast
* @vsi: pointer to VSI struct
* @mac: MAC to add
* @action: action to be performed on filter match
*/
enum ice_status
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac_and_broadcast(vsi, mac, action,
ice_fltr_add_mac_list);
}
/**
* ice_fltr_remove_mac - remove MAC filter
* @vsi: pointer to VSI struct
* @mac: filter MAC to remove
* @action: action to remove
*/
enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
}
/**
* ice_fltr_add_vlan - add single VLAN filter
* @vsi: pointer to VSI struct
* @vlan_id: VLAN ID to add
* @action: action to be performed on filter match
*/
enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_add_vlan_list);
}
/**
* ice_fltr_remove_vlan - remove VLAN filter
* @vsi: pointer to VSI struct
* @vlan_id: filter VLAN to remove
* @action: action to remove
*/
enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_vlan(vsi, vlan_id, action,
ice_fltr_remove_vlan_list);
}
/**
* ice_fltr_add_eth - add specyfic ethertype filter
* @vsi: pointer to VSI struct
* @ethertype: ethertype of filter
* @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match
*/
enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list);
}
/**
* ice_fltr_remove_eth - remove ethertype filter
* @vsi: pointer to VSI struct
* @ethertype: ethertype of filter
* @flag: direction of filter
* @action: action to remove
*/
enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
u16 flag, enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018-2020, Intel Corporation. */
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
void ice_fltr_free_list(struct device *dev, struct list_head *h);
enum ice_status
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
enum ice_status
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
enum ice_status
ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
#endif
......@@ -58,6 +58,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
......
......@@ -262,6 +262,12 @@ enum ice_rx_flex_desc_status_error_0_bits {
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
......@@ -413,6 +419,25 @@ enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_RESERVED = 0x40
};
enum ice_tx_ctx_desc_eipt_offload {
ICE_TX_CTX_EIPT_NONE = 0x0,
ICE_TX_CTX_EIPT_IPV6 = 0x1,
ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
ICE_TX_CTX_EIPT_IPV4 = 0x3
};
#define ICE_TXD_CTX_QW0_EIPLEN_S 2
#define ICE_TXD_CTX_QW0_L4TUNT_S 9
#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
#define ICE_TXD_CTX_QW0_NATLEN_S 12
#define ICE_TXD_CTX_QW0_L4T_CS_S 23
#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
......
......@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_flow.h"
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
/**
......@@ -37,7 +38,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
*/
static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
int ret = 0;
u16 i;
for (i = 0; i < vsi->num_rxq; i++)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
......@@ -248,8 +250,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
vsi->vsi_num, status);
dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
vsi->vsi_num, ice_stat_str(status));
kfree(ctxt);
}
......@@ -521,8 +523,8 @@ static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
vsi->vsi_num, ice_stat_str(status));
}
/**
......@@ -565,8 +567,8 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = cap->rss_table_size;
vsi->rss_size = min_t(int, num_online_cpus(),
vsi->rss_table_size = (u16)cap->rss_table_size;
vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
break;
......@@ -684,15 +686,15 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_RSS_QS_PER_VF;
qcount_rx = min_t(int, rx_numq_tc, max_rss);
qcount_rx = min_t(u16, rx_numq_tc, max_rss);
if (!vsi->req_rxq)
qcount_rx = min_t(int, qcount_rx,
qcount_rx = min_t(u16, qcount_rx,
vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx);
pow = (u16)order_base_2(qcount_rx);
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
......@@ -941,7 +943,7 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
*/
static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
{
int start = 0, end = 0;
u16 start = 0, end = 0;
if (needed > res->end)
return -ENOMEM;
......@@ -1024,6 +1026,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 num_q_vectors;
int base;
dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */
......@@ -1038,14 +1041,15 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
ice_get_free_res_count(pf->irq_tracker),
ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
return -ENOENT;
}
vsi->base_vector = (u16)base;
pf->num_avail_sw_msix -= num_q_vectors;
return 0;
......@@ -1085,7 +1089,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
int i;
u16 i;
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
......@@ -1178,7 +1182,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
u8 *lut;
dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
......@@ -1193,7 +1197,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
dev_err(dev, "set_rss_lut failed, error %d\n", status);
dev_err(dev, "set_rss_lut failed, error %s\n",
ice_stat_str(status));
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
......@@ -1215,7 +1220,8 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
dev_err(dev, "set_rss_key failed, error %d\n", status);
dev_err(dev, "set_rss_key failed, error %s\n",
ice_stat_str(status));
err = -EIO;
}
......@@ -1248,8 +1254,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
vsi->vsi_num, ice_stat_str(status));
}
/**
......@@ -1281,91 +1287,57 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
}
/**
* ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
* Adds MAC address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
if (!tmp)
return -ENOMEM;
tmp->fltr_info.flag = ICE_FLTR_TX;
tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp->fltr_info.vsi_handle = vsi->idx;
ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
INIT_LIST_HEAD(&tmp->list_entry);
list_add(&tmp->list_entry, add_list);
return 0;
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
vsi_num, ice_stat_str(status));
}
/**
......@@ -1414,55 +1386,22 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
vsi->stat_offsets_loaded = true;
}
/**
* ice_free_fltr_list - free filter lists helper
* @dev: pointer to the device struct
* @h: pointer to the list head to be freed
*
* Helper function to free filter lists previously created using
* ice_add_mac_to_list
*/
void ice_free_fltr_list(struct device *dev, struct list_head *h)
{
struct ice_fltr_list_entry *e, *tmp;
list_for_each_entry_safe(e, tmp, h, list_entry) {
list_del(&e->list_entry);
devm_kfree(dev, e);
}
}
/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
* @vid: VLAN ID to be added
* @action: filter action to be performed on match
*/
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
int
ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
tmp->fltr_info.flag = ICE_FLTR_TX;
tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
tmp->fltr_info.vsi_handle = vsi->idx;
tmp->fltr_info.l_data.vlan.vlan_id = vid;
INIT_LIST_HEAD(&tmp->list_entry);
list_add(&tmp->list_entry, &tmp_add_list);
status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (!status) {
if (!ice_fltr_add_vlan(vsi, vid, action)) {
vsi->num_vlan++;
} else {
err = -ENODEV;
......@@ -1470,7 +1409,6 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
vsi->vsi_num);
}
ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
......@@ -1483,41 +1421,25 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
{
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
int err = 0;
dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return -ENOMEM;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
list->fltr_info.vsi_handle = vsi->idx;
list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
list->fltr_info.l_data.vlan.vlan_id = vid;
list->fltr_info.flag = ICE_FLTR_TX;
list->fltr_info.src_id = ICE_SRC_ID_VSI;
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!status) {
vsi->num_vlan--;
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
vid, vsi->vsi_num, ice_stat_str(status));
} else {
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
vid, vsi->vsi_num, ice_stat_str(status));
err = -EIO;
}
ice_free_fltr_list(dev, &tmp_add_list);
return err;
}
......@@ -1671,7 +1593,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
u16 txq = 0, rxq = 0;
int i, q;
for (i = 0; i < vsi->num_q_vectors; i++) {
......@@ -1737,8 +1659,9 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
......@@ -1783,8 +1706,9 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
ena, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
......@@ -1922,9 +1846,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
pf->hw.adminq.sq_last_status);
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
ice_stat_str(status),
ice_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
......@@ -1990,47 +1915,6 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
return -EINVAL;
}
/**
* ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
* @vsi: the VSI being configured
* @add_rule: boolean value to add or remove ethertype filter rule
*/
static void
ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
{
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
list->fltr_info.fltr_act = ICE_DROP_PACKET;
list->fltr_info.flag = ICE_FLTR_TX;
list->fltr_info.src_id = ICE_SRC_ID_VSI;
list->fltr_info.vsi_handle = vsi->idx;
list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
if (add_rule)
status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
else
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list);
}
/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
......@@ -2039,45 +1923,25 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
struct ice_fltr_list_entry *list;
enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
struct device *dev;
dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
list->fltr_info.vsi_handle = vsi->idx;
list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
if (tx) {
list->fltr_info.fltr_act = ICE_DROP_PACKET;
list->fltr_info.flag = ICE_FLTR_TX;
list->fltr_info.src_id = ICE_SRC_ID_VSI;
} else {
list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
list->fltr_info.flag = ICE_FLTR_RX;
list->fltr_info.src_id = ICE_SRC_ID_LPORT;
}
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
if (create)
status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
if (tx)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
else
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
if (status)
dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list);
vsi->vsi_num, ice_stat_str(status));
}
/**
......@@ -2164,7 +2028,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* so this handles those cases (i.e. adding the PF to a bridge
* without the 8021q module loaded).
*/
ret = ice_vsi_add_vlan(vsi, 0);
ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
goto unroll_clear_rings;
......@@ -2223,8 +2087,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
dev_err(dev, "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
goto unroll_vector_base;
}
......@@ -2239,9 +2103,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
*/
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
......@@ -2558,7 +2421,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false);
ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
......@@ -2568,7 +2432,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
......@@ -2814,8 +2678,8 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
if (init_vsi) {
ret = -EIO;
goto err_vectors;
......@@ -2924,8 +2788,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs);
if (status) {
dev_err(dev, "VSI %d failed TC config, error %d\n",
vsi->vsi_num, status);
dev_err(dev, "VSI %d failed TC config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
ret = -EIO;
goto out;
}
......@@ -2984,36 +2848,6 @@ void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
u64_stats_update_end(&rx_ring->syncp);
}
/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
* @set: Add or delete a MAC filter
*
* Adds or removes MAC address filter entry for VF VSI
*/
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
{
LIST_HEAD(tmp_add_list);
enum ice_status status;
/* Update MAC filter list to be added or removed for a VSI */
if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
status = ICE_ERR_NO_MEMORY;
goto cfg_mac_fltr_exit;
}
if (set)
status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
else
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
cfg_mac_fltr_exit:
ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
return status;
}
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
......@@ -3079,8 +2913,8 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
......@@ -3118,8 +2952,8 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
dflt_vsi->vsi_num, status);
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
dflt_vsi->vsi_num, ice_stat_str(status));
return -EIO;
}
......
......@@ -8,12 +8,6 @@
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr);
void ice_free_fltr_list(struct device *dev, struct list_head *h);
void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
......@@ -22,7 +16,8 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int
ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
......
......@@ -8,6 +8,7 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "ice_devlink.h"
......@@ -133,38 +134,24 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
static int ice_init_mac_fltr(struct ice_pf *pf)
{
enum ice_status status;
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
u8 *perm_addr;
vsi = ice_get_main_vsi(pf);
if (!vsi)
return -EINVAL;
/* To add a MAC filter, first add the MAC to a list and then
* pass the list to ice_add_mac.
*/
/* Add a unicast MAC filter so the VSI can get its packets */
status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
if (status)
goto unregister;
/* VSI needs to receive broadcast traffic, so add the broadcast
* MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
if (status)
goto unregister;
perm_addr = vsi->port_info->mac.perm_addr;
status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
if (!status)
return 0;
return 0;
unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
status);
if (vsi->netdev->reg_state == NETREG_REGISTERED) {
dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
ice_stat_str(status));
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
......@@ -188,7 +175,8 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr))
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
......@@ -209,7 +197,8 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr))
if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
ICE_FWD_TO_VSI))
return -EINVAL;
return 0;
......@@ -307,8 +296,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
if (status) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
......@@ -319,8 +308,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Add MAC addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
......@@ -357,7 +346,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
} else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
if (vsi->vlan_ena)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
......@@ -1017,8 +1007,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
if (ret == ICE_ERR_AQ_NO_WORK)
break;
if (ret) {
dev_err(dev, "%s Receive Queue event error %d\n", qtype,
ret);
dev_err(dev, "%s Receive Queue event error %s\n", qtype,
ice_stat_str(ret));
break;
}
......@@ -1809,8 +1799,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error:%d\n",
status);
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
ice_stat_str(status));
goto clear_xdp_rings;
}
ice_vsi_assign_bpf_prog(vsi, prog);
......@@ -2137,10 +2127,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
ret = IRQ_HANDLED;
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
}
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
return ret;
}
......@@ -2247,7 +2235,7 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
return oicr_idx;
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = oicr_idx;
pf->oicr_idx = (u16)oicr_idx;
err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
ice_misc_intr, 0, pf->int_name, pf);
......@@ -2342,13 +2330,27 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
tso_features = NETIF_F_TSO |
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_L4;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
netdev->mpls_features = NETIF_F_HW_CSUM;
/* enable features */
netdev->features |= netdev->hw_features;
/* encap and VLAN devices inherit default, csumo and tso features */
......@@ -2509,7 +2511,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
ret = ice_vsi_add_vlan(vsi, vid);
ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (!ret) {
vsi->vlan_ena = true;
set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
......@@ -2630,7 +2632,8 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
static u16
ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
{
u16 count = 0, bit;
unsigned long bit;
u16 count = 0;
mutex_lock(lock);
for_each_clear_bit(bit, pf_qmap, size)
......@@ -2869,8 +2872,8 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/* populate SW interrupts pool with number of OS granted IRQs. */
pf->num_avail_sw_msix = vectors;
pf->irq_tracker->num_entries = vectors;
pf->num_avail_sw_msix = (u16)vectors;
pf->irq_tracker->num_entries = (u16)vectors;
pf->irq_tracker->end = pf->irq_tracker->num_entries;
return 0;
......@@ -2902,9 +2905,9 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
}
if (new_tx)
vsi->req_txq = new_tx;
vsi->req_txq = (u16)new_tx;
if (new_rx)
vsi->req_rxq = new_rx;
vsi->req_rxq = (u16)new_rx;
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
......@@ -3298,9 +3301,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_interrupt_unroll;
}
/* Driver is mostly up */
clear_bit(__ICE_DOWN, pf->state);
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
......@@ -3356,9 +3356,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
/* If no DDP driven features have to be setup, return here */
/* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
return 0;
goto probe_done;
/* initialize DDP driven features */
......@@ -3373,6 +3373,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
probe_done:
/* ready to go, so clear down state bit */
clear_bit(__ICE_DOWN, pf->state);
return 0;
err_alloc_sw_unroll:
......@@ -3705,25 +3708,24 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
/* When we change the MAC address we also have to change the MAC address
* based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac via
* ice_vsi_cfg_mac_fltr function call for both add and/or remove
* filters.
*/
status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
if (status) {
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
if (status && status != ICE_ERR_DOES_NOT_EXIST) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
if (status) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
/* Add filter for new MAC. If filter exists, just return success */
status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
return 0;
}
/* error if the new filter addition failed */
if (status)
err = -EADDRNOTAVAIL;
err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
......@@ -3740,8 +3742,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
mac, status);
netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
mac, ice_stat_str(status));
}
return 0;
}
......@@ -3805,8 +3807,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
status);
netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
ice_stat_str(status));
return -EIO;
}
......@@ -4604,8 +4606,9 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
status, vsi->idx, ice_vsi_type_str(type));
dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
ice_stat_str(status), vsi->idx,
ice_vsi_type_str(type));
return -EIO;
}
......@@ -4674,7 +4677,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_init_all_ctrlq(hw);
if (ret) {
dev_err(dev, "control queues init failed %d\n", ret);
dev_err(dev, "control queues init failed %s\n",
ice_stat_str(ret));
goto err_init_ctrlq;
}
......@@ -4690,7 +4694,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
dev_err(dev, "clear PF configuration failed %s\n",
ice_stat_str(ret));
goto err_init_ctrlq;
}
......@@ -4704,7 +4709,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %d\n", ret);
dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
goto err_init_ctrlq;
}
......@@ -4746,8 +4751,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
ret);
dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
ice_stat_str(ret));
goto err_vsi_rebuild;
}
......@@ -4858,6 +4863,112 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
/**
* ice_aq_str - convert AQ err code to a string
* @aq_err: the AQ error code to convert
*/
const char *ice_aq_str(enum ice_aq_err aq_err)
{
switch (aq_err) {
case ICE_AQ_RC_OK:
return "OK";
case ICE_AQ_RC_EPERM:
return "ICE_AQ_RC_EPERM";
case ICE_AQ_RC_ENOENT:
return "ICE_AQ_RC_ENOENT";
case ICE_AQ_RC_ENOMEM:
return "ICE_AQ_RC_ENOMEM";
case ICE_AQ_RC_EBUSY:
return "ICE_AQ_RC_EBUSY";
case ICE_AQ_RC_EEXIST:
return "ICE_AQ_RC_EEXIST";
case ICE_AQ_RC_EINVAL:
return "ICE_AQ_RC_EINVAL";
case ICE_AQ_RC_ENOSPC:
return "ICE_AQ_RC_ENOSPC";
case ICE_AQ_RC_ENOSYS:
return "ICE_AQ_RC_ENOSYS";
case ICE_AQ_RC_ENOSEC:
return "ICE_AQ_RC_ENOSEC";
case ICE_AQ_RC_EBADSIG:
return "ICE_AQ_RC_EBADSIG";
case ICE_AQ_RC_ESVN:
return "ICE_AQ_RC_ESVN";
case ICE_AQ_RC_EBADMAN:
return "ICE_AQ_RC_EBADMAN";
case ICE_AQ_RC_EBADBUF:
return "ICE_AQ_RC_EBADBUF";
}
return "ICE_AQ_RC_UNKNOWN";
}
/**
* ice_stat_str - convert status err code to a string
* @stat_err: the status error code to convert
*/
const char *ice_stat_str(enum ice_status stat_err)
{
switch (stat_err) {
case ICE_SUCCESS:
return "OK";
case ICE_ERR_PARAM:
return "ICE_ERR_PARAM";
case ICE_ERR_NOT_IMPL:
return "ICE_ERR_NOT_IMPL";
case ICE_ERR_NOT_READY:
return "ICE_ERR_NOT_READY";
case ICE_ERR_NOT_SUPPORTED:
return "ICE_ERR_NOT_SUPPORTED";
case ICE_ERR_BAD_PTR:
return "ICE_ERR_BAD_PTR";
case ICE_ERR_INVAL_SIZE:
return "ICE_ERR_INVAL_SIZE";
case ICE_ERR_DEVICE_NOT_SUPPORTED:
return "ICE_ERR_DEVICE_NOT_SUPPORTED";
case ICE_ERR_RESET_FAILED:
return "ICE_ERR_RESET_FAILED";
case ICE_ERR_FW_API_VER:
return "ICE_ERR_FW_API_VER";
case ICE_ERR_NO_MEMORY:
return "ICE_ERR_NO_MEMORY";
case ICE_ERR_CFG:
return "ICE_ERR_CFG";
case ICE_ERR_OUT_OF_RANGE:
return "ICE_ERR_OUT_OF_RANGE";
case ICE_ERR_ALREADY_EXISTS:
return "ICE_ERR_ALREADY_EXISTS";
case ICE_ERR_NVM_CHECKSUM:
return "ICE_ERR_NVM_CHECKSUM";
case ICE_ERR_BUF_TOO_SHORT:
return "ICE_ERR_BUF_TOO_SHORT";
case ICE_ERR_NVM_BLANK_MODE:
return "ICE_ERR_NVM_BLANK_MODE";
case ICE_ERR_IN_USE:
return "ICE_ERR_IN_USE";
case ICE_ERR_MAX_LIMIT:
return "ICE_ERR_MAX_LIMIT";
case ICE_ERR_RESET_ONGOING:
return "ICE_ERR_RESET_ONGOING";
case ICE_ERR_HW_TABLE:
return "ICE_ERR_HW_TABLE";
case ICE_ERR_DOES_NOT_EXIST:
return "ICE_ERR_DOES_NOT_EXIST";
case ICE_ERR_AQ_ERROR:
return "ICE_ERR_AQ_ERROR";
case ICE_ERR_AQ_TIMEOUT:
return "ICE_ERR_AQ_TIMEOUT";
case ICE_ERR_AQ_FULL:
return "ICE_ERR_AQ_FULL";
case ICE_ERR_AQ_NO_WORK:
return "ICE_ERR_AQ_NO_WORK";
case ICE_ERR_AQ_EMPTY:
return "ICE_ERR_AQ_EMPTY";
}
return "ICE_ERR_UNKNOWN";
}
/**
* ice_set_rss - Set RSS keys and lut
* @vsi: Pointer to VSI structure
......@@ -4882,8 +4993,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(dev, "Cannot set RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
return -EIO;
}
}
......@@ -4892,8 +5004,9 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
dev_err(dev, "Cannot set RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
return -EIO;
}
}
......@@ -4924,8 +5037,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_key(hw, vsi->idx, buf);
if (status) {
dev_err(dev, "Cannot get RSS key, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
return -EIO;
}
}
......@@ -4934,8 +5048,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, lut_size);
if (status) {
dev_err(dev, "Cannot get RSS lut, err %d aq_err %d\n",
status, hw->adminq.rq_last_status);
dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.rq_last_status));
return -EIO;
}
}
......@@ -5002,8 +5117,9 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
bmode, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
......@@ -5072,8 +5188,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
mode, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return -EIO;
......@@ -5100,6 +5217,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_count++;
/* Check if PFC is enabled for the TC to which the queue belongs
* to. If yes then Tx timeout is not caused by a hung queue, no
* need to reset and rebuild
*/
if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
txqueue);
return;
}
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
......@@ -5157,6 +5284,70 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
pf->tx_timeout_recovery_level++;
}
/**
* ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
* @ti: Tunnel endpoint information
*/
static void
ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
u16 port = ntohs(ti->port);
enum ice_status status;
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
tnl_type = TNL_VXLAN;
break;
case UDP_TUNNEL_TYPE_GENEVE:
tnl_type = TNL_GENEVE;
break;
default:
netdev_err(netdev, "Unknown tunnel type\n");
return;
}
status = ice_create_tunnel(&pf->hw, tnl_type, port);
if (status == ICE_ERR_OUT_OF_RANGE)
netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
port);
else if (status)
netdev_err(netdev, "Error adding UDP tunnel - %s\n",
ice_stat_str(status));
}
/**
* ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
* @ti: Tunnel endpoint information
*/
static void
ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 port = ntohs(ti->port);
enum ice_status status;
bool retval;
retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
if (!retval) {
netdev_info(netdev, "port %d not found in UDP tunnels list\n",
port);
return;
}
status = ice_destroy_tunnel(&pf->hw, port, false);
if (status)
netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
port);
}
/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
......@@ -5173,14 +5364,20 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
int err;
if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
return -EIO;
}
if (test_bit(__ICE_DOWN, pf->state)) {
netdev_err(netdev, "device is not ready yet\n");
return -EBUSY;
}
netif_carrier_off(netdev);
pi = vsi->port_info;
......@@ -5213,6 +5410,10 @@ int ice_open(struct net_device *netdev)
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
/* Update existing tunnels information */
udp_tunnel_get_rx_info(netdev);
return err;
}
......@@ -5263,21 +5464,21 @@ ice_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data;
if (len & ~(ICE_TXD_MACLEN_MAX))
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX))
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb);
if (len & ~(ICE_TXD_L4LEN_MAX))
if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
len = skb_inner_transport_header(skb) -
skb_inner_network_header(skb);
if (len & ~(ICE_TXD_IPLEN_MAX))
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
......@@ -5326,4 +5527,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
.ndo_udp_tunnel_add = ice_udp_tunnel_add,
.ndo_udp_tunnel_del = ice_udp_tunnel_del,
};
......@@ -366,6 +366,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
return 0;
}
/**
* ice_get_netlist_ver_info
* @hw: pointer to the HW struct
*
* Get the netlist version information
*/
static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
{
struct ice_netlist_ver_info *ver = &hw->netlist_ver;
enum ice_status ret;
u32 id_blk_start;
__le16 raw_data;
u16 data, i;
u16 *buff;
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret)
return ret;
buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
GFP_KERNEL);
if (!buff) {
ret = ICE_ERR_NO_MEMORY;
goto exit_no_mem;
}
/* read module length */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = le16_to_cpu(raw_data);
/* exit if length is = 0 */
if (!data)
goto exit_error;
/* read node count */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
/* netlist ID block starts from offset 4 + node count * 2 */
id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
/* read the entire netlist ID block */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
id_blk_start * 2,
ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
false, NULL);
if (ret)
goto exit_error;
for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
/* Read the left most 4 bytes of SHA */
ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
exit_error:
kfree(buff);
exit_no_mem:
ice_release_nvm(hw);
return ret;
}
/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
......@@ -515,6 +596,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
return status;
}
/* read the netlist version information */
status = ice_get_netlist_ver_info(hw);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
return 0;
}
......
......@@ -18,6 +18,7 @@ enum ice_prot_id {
ICE_PROT_IPV6_IL = 41,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
......
......@@ -1917,7 +1917,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*/
static enum ice_status
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 bw_alloc)
enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
......
......@@ -593,8 +593,8 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_IS_VF)
is_vf = true;
res_type = le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
/* FW VSI is not needed. Just continue. */
......@@ -1618,12 +1618,12 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
struct list_head *rule_head;
u16 elem_sent, total_elem_left;
u16 total_elem_left, s_rule_size;
struct ice_switch_info *sw;
struct mutex *rule_lock; /* Lock to protect filter rule list */
enum ice_status status = 0;
u16 num_unicast = 0;
u16 s_rule_size;
u8 elem_sent;
if (!m_list || !hw)
return ICE_ERR_PARAM;
......@@ -1707,8 +1707,8 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
total_elem_left -= elem_sent) {
struct ice_aqc_sw_rules_elem *entry = r_iter;
elem_sent = min(total_elem_left,
(u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
elem_sent = min_t(u8, total_elem_left,
(ICE_AQ_MAX_BUF_LEN / s_rule_size));
status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
elem_sent, ice_aqc_opc_add_sw_rules,
NULL);
......
......@@ -819,7 +819,7 @@ static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
u8 metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
......@@ -934,7 +934,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
u32 ntc = rx_ring->next_to_clean + 1;
u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
......@@ -1544,7 +1544,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* don't allow the budget to go below 1 because that would exit
* polling early.
*/
budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
else
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
......@@ -1680,7 +1680,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
*/
while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, max_data, td_tag);
ice_build_ctob(td_cmd, td_offset, max_data,
td_tag);
tx_desc++;
i++;
......@@ -1700,8 +1701,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
if (likely(!data_len))
break;
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
size, td_tag);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
size, td_tag);
tx_desc++;
i++;
......@@ -1732,8 +1733,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
td_tag);
tx_desc->cmd_type_offset_bsz =
ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
......@@ -1807,12 +1808,94 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
if (skb->encapsulation)
return -1;
protocol = vlan_get_protocol(skb);
if (protocol == htons(ETH_P_IP))
first->tx_flags |= ICE_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
bool gso_ena = false;
u32 tunnel = 0;
/* define outer network header type */
if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
ICE_TX_CTX_EIPT_IPV4 :
ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
l4_proto = ip.v4->protocol;
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
tunnel |= ICE_TX_CTX_EIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
if (l4.hdr != exthdr)
ipv6_skip_exthdr(skb, exthdr - skb->data,
&l4_proto, &frag_off);
}
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_GRE:
tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
l4.hdr = skb_inner_network_header(skb);
break;
default:
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return -1;
skb_checksum_help(skb);
return 0;
}
/* compute outer L3 header size */
tunnel |= ((l4.hdr - ip.hdr) / 4) <<
ICE_TXD_CTX_QW0_EIPLEN_S;
/* switch IP header pointer from outer to inner header */
ip.hdr = skb_inner_network_header(skb);
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
ICE_TXD_CTX_QW0_NATLEN_S;
gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
/* indicate if we need to offload outer UDP header */
if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
/* record tunnel offload values */
off->cd_tunnel_params |= tunnel;
/* set DTYP=1 to indicate that it's an Tx context descriptor
* in IPsec tunnel mode with Tx offloads in Quad word 1
*/
off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
/* switch L4 header pointer from outer to inner */
l4.hdr = skb_inner_transport_header(skb);
l4_proto = 0;
/* reset type as we transition from outer to inner headers */
first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */
protocol = vlan_get_protocol(skb);
if (protocol == htons(ETH_P_IP)) {
if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
......@@ -1822,7 +1905,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
} else if (protocol == htons(ETH_P_IPV6)) {
} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
exthdr = ip.hdr + sizeof(*ip.v6);
l4_proto = ip.v6->nexthdr;
......@@ -1944,7 +2027,8 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
u32 paylen, l4_start;
u32 paylen;
u8 l4_start;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
......@@ -1969,8 +2053,42 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
ip.v6->payload_len = 0;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPXIP4 |
SKB_GSO_IPXIP6 |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
l4.udp->len = 0;
/* determine offset of outer transport header */
l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from outer checksum */
paylen = skb->len - l4_start;
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
/* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
/* initialize inner IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
} else {
ip.v6->payload_len = 0;
}
}
/* determine offset of transport header */
l4_start = l4.hdr - skb->data;
l4_start = (u8)(l4.hdr - skb->data);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
......@@ -1979,12 +2097,12 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
off->header_len = sizeof(l4.udp) + l4_start;
off->header_len = (u8)sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
off->header_len = (l4.tcp->doff * 4) + l4_start;
off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
}
/* update gso_segs and bytecount */
......@@ -2215,7 +2333,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
int i = tx_ring->next_to_use;
u16 i = tx_ring->next_to_use;
/* grab the next descriptor */
cdesc = ICE_TX_CTX_DESC(tx_ring, i);
......
......@@ -113,6 +113,9 @@ static inline int ice_skb_pad(void)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
......
......@@ -8,7 +8,7 @@
* @rx_ring: ring to bump
* @val: new head index
*/
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
......@@ -84,12 +84,17 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
u16 rx_error, rx_status;
u16 rx_stat_err1;
bool ipv4, ipv6;
rx_status = le16_to_cpu(rx_desc->wb.status_error0);
rx_error = rx_status;
rx_error = rx_status & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
rx_stat_err1 = le16_to_cpu(rx_desc->wb.status_error1);
decoded = ice_decode_rx_desc_ptype(ptype);
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
......@@ -125,6 +130,18 @@ ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
goto checksum_fail;
/* check for outer UDP checksum error in tunneled packets */
if ((rx_stat_err1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
goto checksum_fail;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
......@@ -215,8 +232,8 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
size, 0);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
size, 0);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
......
......@@ -22,7 +22,7 @@ ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
}
static inline __le64
build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
(td_cmd << ICE_TXD_QW1_CMD_S) |
......@@ -49,7 +49,7 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val);
void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
......
......@@ -259,6 +259,16 @@ struct ice_nvm_info {
#define ICE_NVM_VER_LEN 32
/* netlist version information */
struct ice_netlist_ver_info {
u32 major; /* major high/low */
u32 minor; /* minor high/low */
u32 type; /* type high/low */
u32 rev; /* revision high/low */
u32 hash; /* SHA-1 hash word */
u16 cust_ver; /* customer version */
};
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
......@@ -491,8 +501,8 @@ struct ice_hw {
u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 num_tx_sched_layers;
u8 num_tx_sched_phys_layers;
u8 flattened_layers;
u8 max_cgds;
u8 sw_entry_point_layer;
......@@ -506,6 +516,7 @@ struct ice_hw {
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
......@@ -568,6 +579,10 @@ struct ice_hw {
u8 *pkg_copy;
u32 pkg_size;
/* tunneling info */
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
......@@ -592,6 +607,8 @@ struct ice_eth_stats {
u64 tx_errors; /* tepc */
};
#define ICE_MAX_UP 8
/* Statistics collected by the MAC */
struct ice_hw_port_stats {
/* eth stats collected by the port */
......
......@@ -4,16 +4,18 @@
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
/**
* ice_validate_vf_id - helper to check if VF ID is valid
* @pf: pointer to the PF structure
* @vf_id: the ID of the VF to check
*/
static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
{
/* vf_id range is only valid for 0-255, and should always be unsigned */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
return -EINVAL;
}
return 0;
......@@ -27,13 +29,44 @@ static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
{
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
vf->vf_id);
return -EBUSY;
}
return 0;
}
/**
* ice_err_to_virt_err - translate errors for VF return code
* @ice_err: error return code
*/
static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
{
switch (ice_err) {
case ICE_SUCCESS:
return VIRTCHNL_STATUS_SUCCESS;
case ICE_ERR_BAD_PTR:
case ICE_ERR_INVAL_SIZE:
case ICE_ERR_DEVICE_NOT_SUPPORTED:
case ICE_ERR_PARAM:
case ICE_ERR_CFG:
return VIRTCHNL_STATUS_ERR_PARAM;
case ICE_ERR_NO_MEMORY:
return VIRTCHNL_STATUS_ERR_NO_MEMORY;
case ICE_ERR_NOT_READY:
case ICE_ERR_RESET_FAILED:
case ICE_ERR_FW_API_VER:
case ICE_ERR_AQ_ERROR:
case ICE_ERR_AQ_TIMEOUT:
case ICE_ERR_AQ_FULL:
case ICE_ERR_AQ_NO_WORK:
case ICE_ERR_AQ_EMPTY:
return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
default:
return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
}
}
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
......@@ -337,7 +370,7 @@ void ice_free_vfs(struct ice_pf *pf)
* before this function ever gets called.
*/
if (!pci_vfs_assigned(pf->pdev)) {
int vf_id;
unsigned int vf_id;
/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
......@@ -368,9 +401,9 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
{
struct ice_pf *pf = vf->pf;
u32 reg, reg_idx, bit_idx;
unsigned int vf_abs_id, i;
struct device *dev;
struct ice_hw *hw;
int vf_abs_id, i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
......@@ -418,7 +451,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
......@@ -460,8 +493,9 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
ret = -EIO;
goto out;
}
......@@ -515,7 +549,6 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
static int ice_alloc_vsi_res(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
LIST_HEAD(tmp_add_list);
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
struct device *dev;
......@@ -537,7 +570,8 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Check if port VLAN exist before, and restore it accordingly */
if (vf->port_vlan_info) {
ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK,
ICE_FWD_TO_VSI))
dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
} else {
......@@ -546,27 +580,23 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* untagged broadcast/multicast traffic seen on the VF
* interface.
*/
if (ice_vsi_add_vlan(vsi, 0))
if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI))
dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
vf->vf_id);
}
eth_broadcast_addr(broadcast);
status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
if (status)
goto ice_alloc_vsi_res_exit;
if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
status = ice_add_mac_to_list(vsi, &tmp_add_list,
vf->dflt_lan_addr.addr);
status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status)
goto ice_alloc_vsi_res_exit;
}
status = ice_add_mac(&pf->hw, &tmp_add_list);
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status)
dev_err(dev, "could not add mac filters error %d\n", status);
dev_err(dev, "could not add mac filters error %d\n",
status);
else
vf->num_mac = 1;
......@@ -577,7 +607,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* more vectors.
*/
ice_alloc_vsi_res_exit:
ice_free_fltr_list(dev, &tmp_add_list);
return status;
}
......@@ -1483,7 +1512,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
void ice_process_vflr_event(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
int vf_id;
unsigned int vf_id;
u32 reg;
if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
......@@ -1524,7 +1553,7 @@ static void ice_vc_reset_vf(struct ice_vf *vf)
*/
static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
{
int vf_id;
unsigned int vf_id;
ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
......@@ -1628,8 +1657,9 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
vf->vf_id, ice_stat_str(aq_ret),
ice_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
......@@ -2044,8 +2074,9 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
ice_stat_str(status));
ret = -EIO;
goto out;
}
......@@ -2059,6 +2090,174 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
return ret;
}
/**
* ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
* @pf: PF structure for accessing VF(s)
*
* Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
* else return true
*/
bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
{
int vf_idx;
ice_for_each_vf(pf, vf_idx) {
struct ice_vf *vf = &pf->vf[vf_idx];
/* found a VF that has promiscuous mode configured */
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
return true;
}
return false;
}
/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* called from the VF to configure VF VSIs promiscuous mode
*/
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
bool rm_promisc;
int ret = 0;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
/* Leave v_ret alone, lie to the VF on purpose. */
goto error_param;
}
rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
!(info->flags & FLAG_VF_MULTICAST_PROMISC);
if (vsi->num_vlan || vf->port_vlan_info) {
struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
struct net_device *pf_netdev;
if (!pf_vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
pf_netdev = pf_vsi->netdev;
ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
if (ret) {
dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
rm_promisc ? "ON" : "OFF", vf->vf_id,
vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
if (ret) {
dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
}
if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
/* only attempt to set the default forwarding VSI if
* it's not currently set
*/
ret = ice_set_dflt_vsi(pf->first_sw, vsi);
else if (!set_dflt_vsi &&
ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
/* only attempt to free the default forwarding VSI if we
* are the owner
*/
ret = ice_clear_dflt_vsi(pf->first_sw);
if (ret) {
dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto error_param;
}
} else {
enum ice_status status;
u8 promisc_m;
if (info->flags & FLAG_VF_UNICAST_PROMISC) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
} else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_MCAST_PROMISC_BITS;
} else {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
}
/* Configure multicast/unicast with or without VLAN promiscuous
* mode
*/
status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
if (status) {
dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
rm_promisc ? "dis" : "en", vf->vf_id,
ice_stat_str(status));
v_ret = ice_err_to_virt_err(status);
goto error_param;
} else {
dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
rm_promisc ? "dis" : "en", vf->vf_id);
}
}
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
else
clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
if (info->flags & FLAG_VF_UNICAST_PROMISC)
set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
else
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
v_ret, NULL, 0);
}
/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
......@@ -2604,14 +2803,14 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EPERM;
}
status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_ALREADY_EXISTS) {
dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
return -EEXIST;
} else if (status) {
dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
mac_addr, vf->vf_id, status);
dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
......@@ -2641,14 +2840,14 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
mac_addr, vf->vf_id, status);
dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
mac_addr, vf->vf_id, ice_stat_str(status));
return -EIO;
}
......@@ -2885,7 +3084,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
/* add VLAN 0 filter back when transitioning from port VLAN to
* no port VLAN. No change to old port VLAN on failure.
*/
ret = ice_vsi_add_vlan(vsi, 0);
ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
if (ret)
return ret;
ret = ice_vsi_manage_pvid(vsi, 0, false);
......@@ -2898,7 +3097,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
vlan_id, qos, vf_id);
/* add VLAN filter for the port VLAN */
ret = ice_vsi_add_vlan(vsi, vlan_id);
ret = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
if (ret)
return ret;
}
......@@ -2992,8 +3191,9 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
vlan_promisc = true;
if (add_v) {
......@@ -3018,7 +3218,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!vid)
continue;
status = ice_vsi_add_vlan(vsi, vid);
status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
......@@ -3317,6 +3517,9 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
case VIRTCHNL_OP_GET_STATS:
err = ice_vc_get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
err = ice_vc_add_vlan_msg(vf, msg);
break;
......
......@@ -64,7 +64,7 @@ struct ice_mdd_vf_events {
struct ice_vf {
struct ice_pf *pf;
s16 vf_id; /* VF ID in the PF space */
u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
/* first vector index of this VF in the PF space */
int first_vector_idx;
......@@ -128,6 +128,7 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);
bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
......@@ -219,5 +220,10 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
{
return false;
}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
......@@ -988,8 +988,8 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
0, desc.len, 0);
tx_desc->cmd_type_offset_bsz =
ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment