Commit ac6d1835 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2020-08-01

This series contains updates to the ice driver only.

Wei Yongjun marks power management functions with __maybe_unused.

Nick disables VLAN pruning in promiscuous mode and renames grst_delay to
grst_timeout.

Kiran modifies the check for linearization and corrects the vsi_id mask
value.

Vignesh replaces the use of flow profile locks to RSS profile locks for RSS
rule removal. Destroys flow profile lock on clearing XLT table and
clears extraction sequence entries.

Jesse adds some statistics and removes an unreported one.

Brett allows for 2 queue configuration for VFs.

Surabhi adds a check for failed allocation of an extraction sequence
table.

Tony updates the PTYPE lookup table and makes other trivial fixes.

Victor extends profile ID locks to be held until all references are
completed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2f631133 7dbc63f0
...@@ -256,6 +256,7 @@ struct ice_vsi { ...@@ -256,6 +256,7 @@ struct ice_vsi {
u32 tx_busy; u32 tx_busy;
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
u32 rx_gro_dropped;
u16 num_q_vectors; u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type; enum ice_vsi_type type;
......
...@@ -1581,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys { ...@@ -1581,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut { struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id; __le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
......
...@@ -1027,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -1027,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw)
*/ */
enum ice_status ice_check_reset(struct ice_hw *hw) enum ice_status ice_check_reset(struct ice_hw *hw)
{ {
u32 cnt, reg = 0, grst_delay, uld_mask; u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR, /* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units. * or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time. * Add 1sec for outstanding AQ commands that can take a long time.
*/ */
grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
GLGEN_RSTCTL_GRSTDEL_S) + 10; GLGEN_RSTCTL_GRSTDEL_S) + 10;
for (cnt = 0; cnt < grst_delay; cnt++) { for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100); mdelay(100);
reg = rd32(hw, GLGEN_RSTAT); reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break; break;
} }
if (cnt == grst_delay) { if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n"); "Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED; return ICE_ERR_RESET_FAILED;
...@@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) ...@@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
* @num: number of resources * @num: number of resources
* @res: pointer to array that contains the resources to free * @res: pointer to array that contains the resources to free
*/ */
enum ice_status enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{ {
struct ice_aqc_alloc_free_res_elem *buf; struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status; enum ice_status status;
...@@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, ...@@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* @cap_count: the number of capabilities * @cap_count: the number of capabilities
* *
* Helper device to parse device (0x000B) capabilities list. For * Helper device to parse device (0x000B) capabilities list. For
* capabilities shared between device and device, this relies on * capabilities shared between device and function, this relies on
* ice_parse_common_caps. * ice_parse_common_caps.
* *
* Loop through the list of provided capabilities and extract the relevant * Loop through the list of provided capabilities and extract the relevant
......
...@@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf) ...@@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
* *
* Create and register a devlink_port for this PF. Note that although each * Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port * physical function is connected to a separate devlink instance, the port
* will still be numbered according to the physical function id. * will still be numbered according to the physical function ID.
* *
* Return: zero on success or an error code on failure. * Return: zero on success or an error code on failure.
*/ */
......
...@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ...@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize), ICE_VSI_STAT("tx_linearize", tx_linearize),
ICE_VSI_STAT("tx_busy", tx_busy),
ICE_VSI_STAT("tx_restart", tx_restart),
}; };
enum ice_ethtool_test_id { enum ice_ethtool_test_id {
...@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ...@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
......
...@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) ...@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never * This function generates a key from a value, a don't care mask and a never
* match mask. * match mask.
* upd, dc, and nm are optional parameters, and can be NULL: * upd, dc, and nm are optional parameters, and can be NULL:
* upd == NULL --> udp mask is all 1's (update all bits) * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits) * dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits) * nm == NULL --> nm mask is all 0's (no never match bits)
*/ */
...@@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) ...@@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
ICE_FLOW_ENTRY_HNDL(e)); ICE_FLOW_ENTRY_HNDL(e));
list_del(&p->l_entry); list_del(&p->l_entry);
mutex_destroy(&p->entries_lock);
devm_kfree(ice_hw_to_dev(hw), p); devm_kfree(ice_hw_to_dev(hw), p);
} }
mutex_unlock(&hw->fl_profs_locks[blk_idx]); mutex_unlock(&hw->fl_profs_locks[blk_idx]);
...@@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) ...@@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(prof_redir->t, 0, memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t)); prof_redir->count * sizeof(*prof_redir->t));
memset(es->t, 0, es->count * sizeof(*es->t)); memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written)); memset(es->written, 0, es->count * sizeof(*es->written));
} }
...@@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) ...@@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count), sizeof(*es->ref_count),
GFP_KERNEL); GFP_KERNEL);
if (!es->ref_count)
goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL); sizeof(*es->written), GFP_KERNEL);
if (!es->ref_count) if (!es->written)
goto err; goto err;
} }
return 0; return 0;
...@@ -3874,16 +3878,16 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], ...@@ -3874,16 +3878,16 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
} }
/** /**
* ice_search_prof_id_low - Search for a profile tracking ID low level * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @blk: hardware block * @blk: hardware block
* @id: profile tracking ID * @id: profile tracking ID
* *
* This will search for a profile tracking ID which was previously added. This * This will search for a profile tracking ID which was previously added.
* version assumes that the caller has already acquired the prof map lock. * The profile map lock should be held before calling this function.
*/ */
static struct ice_prof_map * static struct ice_prof_map *
ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{ {
struct ice_prof_map *entry = NULL; struct ice_prof_map *entry = NULL;
struct ice_prof_map *map; struct ice_prof_map *map;
...@@ -3897,26 +3901,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) ...@@ -3897,26 +3901,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
return entry; return entry;
} }
/**
* ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
* This will search for a profile tracking ID which was previously added.
*/
static struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id_low(hw, blk, id);
mutex_unlock(&hw->blk[blk].es.prof_map_lock);
return entry;
}
/** /**
* ice_vsig_prof_id_count - count profiles in a VSIG * ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) ...@@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
mutex_lock(&hw->blk[blk].es.prof_map_lock); mutex_lock(&hw->blk[blk].es.prof_map_lock);
pmap = ice_search_prof_id_low(hw, blk, id); pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) { if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST; status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof; goto err_ice_rem_prof;
...@@ -4166,22 +4150,28 @@ static enum ice_status ...@@ -4166,22 +4150,28 @@ static enum ice_status
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg) struct list_head *chg)
{ {
enum ice_status status = 0;
struct ice_prof_map *map; struct ice_prof_map *map;
struct ice_chs_chg *p; struct ice_chs_chg *p;
u16 i; u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */ /* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
if (!map) if (!map) {
return ICE_ERR_DOES_NOT_EXIST; status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_get_prof;
}
for (i = 0; i < map->ptg_cnt; i++) for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) { if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */ /* add ES to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL); GFP_KERNEL);
if (!p) if (!p) {
status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof; goto err_ice_get_prof;
}
p->type = ICE_PTG_ES_ADD; p->type = ICE_PTG_ES_ADD;
p->ptype = 0; p->ptype = 0;
...@@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, ...@@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
list_add(&p->list_entry, chg); list_add(&p->list_entry, chg);
} }
return 0;
err_ice_get_prof: err_ice_get_prof:
mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */ /* let caller clean up the change list */
return ICE_ERR_NO_MEMORY; return status;
} }
/** /**
...@@ -4254,17 +4243,23 @@ static enum ice_status ...@@ -4254,17 +4243,23 @@ static enum ice_status
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl) struct list_head *lst, u64 hdl)
{ {
enum ice_status status = 0;
struct ice_prof_map *map; struct ice_prof_map *map;
struct ice_vsig_prof *p; struct ice_vsig_prof *p;
u16 i; u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl); map = ice_search_prof_id(hw, blk, hdl);
if (!map) if (!map) {
return ICE_ERR_DOES_NOT_EXIST; status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_add_prof_to_lst;
}
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) if (!p) {
return ICE_ERR_NO_MEMORY; status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_to_lst;
}
p->profile_cookie = map->profile_cookie; p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id; p->prof_id = map->prof_id;
...@@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, ...@@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
list_add(&p->list, lst); list_add(&p->list, lst);
return 0; err_ice_add_prof_to_lst:
mutex_unlock(&hw->blk[blk].es.prof_map_lock);
return status;
} }
/** /**
...@@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ...@@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
enum ice_status status = 0;
struct ice_prof_map *map; struct ice_prof_map *map;
struct ice_vsig_prof *t; struct ice_vsig_prof *t;
struct ice_chs_chg *p; struct ice_chs_chg *p;
u16 vsig_idx, i; u16 vsig_idx, i;
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map)
return ICE_ERR_DOES_NOT_EXIST;
/* Error, if this VSIG already has this profile */ /* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS; return ICE_ERR_ALREADY_EXISTS;
...@@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ...@@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
if (!t) if (!t)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_add_prof_id_vsig;
}
t->profile_cookie = map->profile_cookie; t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id; t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt; t->tcam_count = map->ptg_cnt;
/* create TCAM entries */ /* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) { for (i = 0; i < map->ptg_cnt; i++) {
enum ice_status status;
u16 tcam_idx; u16 tcam_idx;
/* add TCAM to change list */ /* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) if (!p) {
status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig; goto err_ice_add_prof_id_vsig;
}
/* allocate the TCAM entry index */ /* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
...@@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, ...@@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
list_add(&t->list, list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
return 0; mutex_unlock(&hw->blk[blk].es.prof_map_lock);
return status;
err_ice_add_prof_id_vsig: err_ice_add_prof_id_vsig:
mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */ /* let caller clean up the change list */
devm_kfree(ice_hw_to_dev(hw), t); devm_kfree(ice_hw_to_dev(hw), t);
return ICE_ERR_NO_MEMORY; return status;
} }
/** /**
......
...@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ...@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
if (list_empty(&hw->fl_profs[blk])) if (list_empty(&hw->fl_profs[blk]))
return 0; return 0;
mutex_lock(&hw->fl_profs_locks[blk]); mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
if (test_bit(vsi_handle, p->vsis)) { if (test_bit(vsi_handle, p->vsis)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
...@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ...@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break; break;
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
status = ice_flow_rem_prof_sync(hw, blk, p); status = ice_flow_rem_prof(hw, blk, p->id);
if (status) if (status)
break; break;
} }
} }
mutex_unlock(&hw->fl_profs_locks[blk]); mutex_unlock(&hw->rss_locks);
return status; return status;
} }
...@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) ...@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
*/ */
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{ {
struct ice_rss_cfg *r, *rss_cfg = NULL; u64 rss_hash = ICE_HASH_INVALID;
struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */ /* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
...@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) ...@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
list_for_each_entry(r, &hw->rss_list_head, l_entry) list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) && if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) { r->packet_hdr == hdrs) {
rss_cfg = r; rss_hash = r->hashed_flds;
break; break;
} }
mutex_unlock(&hw->rss_locks); mutex_unlock(&hw->rss_locks);
return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; return rss_hash;
} }
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ #define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
...@@ -362,6 +362,7 @@ ...@@ -362,6 +362,7 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
...@@ -378,6 +379,5 @@ ...@@ -378,6 +379,5 @@
#define PFPM_WUS_FW_RST_WK_M BIT(31) #define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */ #endif /* _ICE_HW_AUTOGEN_H_ */
...@@ -601,6 +601,7 @@ struct ice_tlan_ctx { ...@@ -601,6 +601,7 @@ struct ice_tlan_ctx {
/* shorter macros makes the table fit but are terse */ /* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */ /* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
...@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { ...@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(0), ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT_UNUSED_ENTRY(3),
ICE_PTT_UNUSED_ENTRY(4),
ICE_PTT_UNUSED_ENTRY(5),
ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT_UNUSED_ENTRY(8),
ICE_PTT_UNUSED_ENTRY(9),
ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
ICE_PTT_UNUSED_ENTRY(12),
ICE_PTT_UNUSED_ENTRY(13),
ICE_PTT_UNUSED_ENTRY(14),
ICE_PTT_UNUSED_ENTRY(15),
ICE_PTT_UNUSED_ENTRY(16),
ICE_PTT_UNUSED_ENTRY(17),
ICE_PTT_UNUSED_ENTRY(18),
ICE_PTT_UNUSED_ENTRY(19),
ICE_PTT_UNUSED_ENTRY(20),
ICE_PTT_UNUSED_ENTRY(21),
/* Non Tunneled IPv4 */
ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(25),
ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv4 --> IPv4 */
ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(32),
ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> IPv6 */
ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(39),
ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT */
ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> IPv4 */
ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(47),
ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> IPv6 */
ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(54),
ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC */
ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(62),
ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(69),
ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv4 --> GRE/NAT --> MAC/VLAN */
ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(77),
ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(84),
ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* Non Tunneled IPv6 */
ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
ICE_PTT_UNUSED_ENTRY(91),
ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
/* IPv6 --> IPv4 */
ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(98),
ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> IPv6 */
ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(105),
ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT */
ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> IPv4 */
ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(113),
ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> IPv6 */
ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(120),
ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC */
ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(128),
ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(135),
ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN */
ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(143),
ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
ICE_PTT_UNUSED_ENTRY(150),
ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
/* unused entries */
ICE_PTT_UNUSED_ENTRY(154),
ICE_PTT_UNUSED_ENTRY(155),
ICE_PTT_UNUSED_ENTRY(156),
ICE_PTT_UNUSED_ENTRY(157),
ICE_PTT_UNUSED_ENTRY(158),
ICE_PTT_UNUSED_ENTRY(159),
ICE_PTT_UNUSED_ENTRY(160),
ICE_PTT_UNUSED_ENTRY(161),
ICE_PTT_UNUSED_ENTRY(162),
ICE_PTT_UNUSED_ENTRY(163),
ICE_PTT_UNUSED_ENTRY(164),
ICE_PTT_UNUSED_ENTRY(165),
ICE_PTT_UNUSED_ENTRY(166),
ICE_PTT_UNUSED_ENTRY(167),
ICE_PTT_UNUSED_ENTRY(168),
ICE_PTT_UNUSED_ENTRY(169),
ICE_PTT_UNUSED_ENTRY(170),
ICE_PTT_UNUSED_ENTRY(171),
ICE_PTT_UNUSED_ENTRY(172),
ICE_PTT_UNUSED_ENTRY(173),
ICE_PTT_UNUSED_ENTRY(174),
ICE_PTT_UNUSED_ENTRY(175),
ICE_PTT_UNUSED_ENTRY(176),
ICE_PTT_UNUSED_ENTRY(177),
ICE_PTT_UNUSED_ENTRY(178),
ICE_PTT_UNUSED_ENTRY(179),
ICE_PTT_UNUSED_ENTRY(180),
ICE_PTT_UNUSED_ENTRY(181),
ICE_PTT_UNUSED_ENTRY(182),
ICE_PTT_UNUSED_ENTRY(183),
ICE_PTT_UNUSED_ENTRY(184),
ICE_PTT_UNUSED_ENTRY(185),
ICE_PTT_UNUSED_ENTRY(186),
ICE_PTT_UNUSED_ENTRY(187),
ICE_PTT_UNUSED_ENTRY(188),
ICE_PTT_UNUSED_ENTRY(189),
ICE_PTT_UNUSED_ENTRY(190),
ICE_PTT_UNUSED_ENTRY(191),
ICE_PTT_UNUSED_ENTRY(192),
ICE_PTT_UNUSED_ENTRY(193),
ICE_PTT_UNUSED_ENTRY(194),
ICE_PTT_UNUSED_ENTRY(195),
ICE_PTT_UNUSED_ENTRY(196),
ICE_PTT_UNUSED_ENTRY(197),
ICE_PTT_UNUSED_ENTRY(198),
ICE_PTT_UNUSED_ENTRY(199),
ICE_PTT_UNUSED_ENTRY(200),
ICE_PTT_UNUSED_ENTRY(201),
ICE_PTT_UNUSED_ENTRY(202),
ICE_PTT_UNUSED_ENTRY(203),
ICE_PTT_UNUSED_ENTRY(204),
ICE_PTT_UNUSED_ENTRY(205),
ICE_PTT_UNUSED_ENTRY(206),
ICE_PTT_UNUSED_ENTRY(207),
ICE_PTT_UNUSED_ENTRY(208),
ICE_PTT_UNUSED_ENTRY(209),
ICE_PTT_UNUSED_ENTRY(210),
ICE_PTT_UNUSED_ENTRY(211),
ICE_PTT_UNUSED_ENTRY(212),
ICE_PTT_UNUSED_ENTRY(213),
ICE_PTT_UNUSED_ENTRY(214),
ICE_PTT_UNUSED_ENTRY(215),
ICE_PTT_UNUSED_ENTRY(216),
ICE_PTT_UNUSED_ENTRY(217),
ICE_PTT_UNUSED_ENTRY(218),
ICE_PTT_UNUSED_ENTRY(219),
ICE_PTT_UNUSED_ENTRY(220),
ICE_PTT_UNUSED_ENTRY(221),
ICE_PTT_UNUSED_ENTRY(222),
ICE_PTT_UNUSED_ENTRY(223),
ICE_PTT_UNUSED_ENTRY(224),
ICE_PTT_UNUSED_ENTRY(225),
ICE_PTT_UNUSED_ENTRY(226),
ICE_PTT_UNUSED_ENTRY(227),
ICE_PTT_UNUSED_ENTRY(228),
ICE_PTT_UNUSED_ENTRY(229),
ICE_PTT_UNUSED_ENTRY(230),
ICE_PTT_UNUSED_ENTRY(231),
ICE_PTT_UNUSED_ENTRY(232),
ICE_PTT_UNUSED_ENTRY(233),
ICE_PTT_UNUSED_ENTRY(234),
ICE_PTT_UNUSED_ENTRY(235),
ICE_PTT_UNUSED_ENTRY(236),
ICE_PTT_UNUSED_ENTRY(237),
ICE_PTT_UNUSED_ENTRY(238),
ICE_PTT_UNUSED_ENTRY(239),
ICE_PTT_UNUSED_ENTRY(240),
ICE_PTT_UNUSED_ENTRY(241),
ICE_PTT_UNUSED_ENTRY(242),
ICE_PTT_UNUSED_ENTRY(243),
ICE_PTT_UNUSED_ENTRY(244),
ICE_PTT_UNUSED_ENTRY(245),
ICE_PTT_UNUSED_ENTRY(246),
ICE_PTT_UNUSED_ENTRY(247),
ICE_PTT_UNUSED_ENTRY(248),
ICE_PTT_UNUSED_ENTRY(249),
ICE_PTT_UNUSED_ENTRY(250),
ICE_PTT_UNUSED_ENTRY(251),
ICE_PTT_UNUSED_ENTRY(252),
ICE_PTT_UNUSED_ENTRY(253),
ICE_PTT_UNUSED_ENTRY(254),
ICE_PTT_UNUSED_ENTRY(255),
}; };
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
......
...@@ -2017,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) ...@@ -2017,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
if (!vsi) if (!vsi)
return -EINVAL; return -EINVAL;
/* Don't enable VLAN pruning if the netdev is currently in promiscuous
* mode. VLAN pruning will be enabled when the interface exits
* promiscuous mode if any VLAN filters are active.
*/
if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
return 0;
pf = vsi->back; pf = vsi->back;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt) if (!ctxt)
......
...@@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ...@@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC; ~IFF_PROMISC;
goto out_promisc; goto out_promisc;
} }
ice_cfg_vlan_pruning(vsi, false, false);
} }
} else { } else {
/* Clear Rx filter to remove traffic from wire */ /* Clear Rx filter to remove traffic from wire */
...@@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ...@@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC; IFF_PROMISC;
goto out_promisc; goto out_promisc;
} }
if (vsi->num_vlan > 1)
ice_cfg_vlan_pruning(vsi, true, false);
} }
} }
} }
...@@ -4467,7 +4470,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) ...@@ -4467,7 +4470,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
* Power Management callback to quiesce the device and prepare * Power Management callback to quiesce the device and prepare
* for D3 transition. * for D3 transition.
*/ */
static int ice_suspend(struct device *dev) static int __maybe_unused ice_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf; struct ice_pf *pf;
...@@ -4531,7 +4534,7 @@ static int ice_suspend(struct device *dev) ...@@ -4531,7 +4534,7 @@ static int ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3 * ice_resume - PM callback for waking up from D3
* @dev: generic device information structure * @dev: generic device information structure
*/ */
static int ice_resume(struct device *dev) static int __maybe_unused ice_resume(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type; enum ice_reset_req reset_type;
...@@ -5290,6 +5293,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) ...@@ -5290,6 +5293,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0; vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0; vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0; vsi->rx_page_failed = 0;
vsi->rx_gro_dropped = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -5304,6 +5308,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) ...@@ -5304,6 +5308,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes; vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
} }
/* update XDP Tx rings counters */ /* update XDP Tx rings counters */
...@@ -5335,7 +5340,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ...@@ -5335,7 +5340,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi); ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors; cur_ns->tx_errors = cur_es->tx_errors;
cur_ns->rx_dropped = cur_es->rx_discards; cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast; cur_ns->multicast = cur_es->rx_multicast;
......
...@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, ...@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
} }
/* query the current node information from FW before additing it /* query the current node information from FW before adding it
* to the SW DB * to the SW DB
*/ */
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
...@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) ...@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/** /**
* ice_aq_rl_profile - performs a rate limiting task * ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @opcode:opcode for add, query, or remove profile(s) * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles * @num_profiles: the number of profiles
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
......
...@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) ...@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma; dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */ /* since we are recycling buffers we should seldom need to alloc */
if (likely(page)) { if (likely(page))
rx_ring->rx_stats.page_reuse_count++;
return true; return true;
}
/* alloc new page for storage */ /* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
...@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) ...@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) { if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf); ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else { } else {
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
...@@ -1254,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1254,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* @itr: ITR value to update * @itr: ITR value to update
* *
* Calculate how big of an increment should be applied to the ITR value passed * Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed. * link speed.
* *
* The following is a calculation derived from: * The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int * wmem_default / (size + overhead) = desired_pkts_per_int
* rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
* *
* Assuming wmem_default is 212992 and overhead is 640 bytes per * Assuming wmem_default is 212992 and overhead is 640 bytes per
...@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb) ...@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and /* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum. * then removing stale fragments from the sum.
*/ */
stale = &skb_shinfo(skb)->frags[0]; for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
for (;;) { int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++); sum += skb_frag_size(frag++);
/* The stale fragment may present us with a smaller
* descriptor than the actual fragment size. To account
* for that we need to remove all the data on the front and
* figure out what the remainder would be in the last
* descriptor associated with the fragment.
*/
if (stale_size > ICE_MAX_DATA_PER_TXD) {
int align_pad = -(skb_frag_off(stale)) &
(ICE_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > ICE_MAX_DATA_PER_TXD);
}
/* if sum is negative we failed to make sufficient progress */ /* if sum is negative we failed to make sufficient progress */
if (sum < 0) if (sum < 0)
return true; return true;
...@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) ...@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--) if (!nr_frags--)
break; break;
sum -= skb_frag_size(stale++); sum -= stale_size;
} }
return false; return false;
......
...@@ -193,7 +193,7 @@ struct ice_rxq_stats { ...@@ -193,7 +193,7 @@ struct ice_rxq_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buf_failed; u64 alloc_buf_failed;
u64 page_reuse_count; u64 gro_dropped; /* GRO returned dropped */
}; };
/* this enum matches hardware bits and is meant to be used by DYN_CTLN /* this enum matches hardware bits and is meant to be used by DYN_CTLN
......
...@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) ...@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK)) (vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&rx_ring->q_vector->napi, skb); if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
/* this is tracked separately to help us debug stack drops */
rx_ring->rx_stats.gro_dropped++;
netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
rx_ring->q_index);
}
} }
/** /**
......
...@@ -321,7 +321,7 @@ struct ice_nvm_info { ...@@ -321,7 +321,7 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */ u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */ u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */ u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
}; };
struct ice_link_default_override_tlv { struct ice_link_default_override_tlv {
......
...@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf) ...@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
num_msix_per_vf = ICE_NUM_VF_MSIX_MED; num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
num_msix_per_vf = ICE_MIN_INTR_PER_VF; num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else { } else {
...@@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size; vsi->max_frame = qpi->rxq.max_pkt_size;
} }
/* VF can request to configure less than allocated queues /* VF can request to configure less than allocated queues or default
* or default allocated queues. So update the VSI with new number * allocated queues. So update the VSI with new number
*/ */
vsi->num_txq = num_txq; vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq; vsi->num_rxq = num_rxq;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#define ICE_MAX_RSS_QS_PER_VF 16 #define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17 #define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5 #define ICE_NUM_VF_MSIX_SMALL 5
#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20 #define ICE_MAX_VF_RESET_SLEEP_MS 20
......
...@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) ...@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
} }
} }
/** /**
* ice_xsk_umem_disable - disable a UMEM region * ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI * @vsi: Current VSI
...@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ...@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size) if (!size)
break; break;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size; rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp); xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment