Commit 4ee508ff authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2022-03-03

Jacob Keller says:

This series refactors the ice networking driver VF storage from a simple
static array to a hash table. It also introduces krefs and proper locking
and protection to prevent common use-after-free and concurrency issues.

There are two motivations for this work. First is to make the ice driver
more resilient by preventing a whole class of use-after-free bugs that can
occur around concurrent access to VF structures while removing VFs.

The second is to prepare the ice driver for future virtualization work to
support Scalable IOV, an alternative VF implementation compared to Single
Root IOV. The new VF implementation will allow for more dynamic VF creation
and removal, necessitating a more robust implementation for VF storage that
can't rely on the existing mechanisms to prevent concurrent access
violations.

The first few patches are cleanup and preparatory work needed to make the
conversion to the hash table safe. Following this preparatory work is a
patch to migrate the VF structures and variables to a new sub-structure for
code clarity. Next introduce new interface functions to abstract the VF
storage. Finally, the driver is actually converted to the hash table and
kref implementation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f2ecfa06 3d5985a1
...@@ -109,7 +109,6 @@ ...@@ -109,7 +109,6 @@
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */ /* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1) #define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff #define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */ #define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
...@@ -333,7 +332,7 @@ struct ice_vsi { ...@@ -333,7 +332,7 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */ u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */ u16 idx; /* software index in pf->vsi[] */
s16 vf_id; /* VF ID for SR-IOV VSIs */ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */ u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr; u16 num_gfltr;
...@@ -529,15 +528,7 @@ struct ice_pf { ...@@ -529,15 +528,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */ struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */ struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */ struct ice_vfs vfs;
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
u16 num_vfs_supported; /* num VFs supported for this PF */
u16 num_qps_per_vf;
u16 num_msix_per_vf;
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
......
...@@ -323,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf ...@@ -323,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break; break;
case ICE_VSI_VF: case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */ /* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break; break;
case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_SWITCHDEV_CTRL:
...@@ -429,7 +429,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) ...@@ -429,7 +429,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/ */
if (ice_is_dvm_ena(hw)) if (ice_is_dvm_ena(hw))
if (vsi->type == ICE_VSI_VF && if (vsi->type == ICE_VSI_VF &&
ice_vf_is_port_vlan_ena(&vsi->back->vf[vsi->vf_id])) ice_vf_is_port_vlan_ena(vsi->vf))
rlan_ctx.l2tsel = 1; rlan_ctx.l2tsel = 1;
else else
rlan_ctx.l2tsel = 0; rlan_ctx.l2tsel = 0;
......
...@@ -176,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) ...@@ -176,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id; int q_id;
ice_for_each_txq(vsi, q_id) { ice_for_each_txq(vsi, q_id) {
struct ice_repr *repr = pf->vf[q_id].repr; struct ice_q_vector *q_vector;
struct ice_q_vector *q_vector = repr->q_vector; struct ice_tx_ring *tx_ring;
struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; struct ice_rx_ring *rx_ring;
struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; struct ice_repr *repr;
struct ice_vf *vf;
vf = ice_get_vf_by_id(pf, q_id);
if (WARN_ON(!vf))
continue;
repr = vf->repr;
q_vector = repr->q_vector;
tx_ring = vsi->tx_rings[q_id];
rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
...@@ -199,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf) ...@@ -199,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
rx_ring->next = NULL; rx_ring->next = NULL;
rx_ring->netdev = repr->netdev; rx_ring->netdev = repr->netdev;
ice_put_vf(vf);
}
}
/**
* ice_eswitch_release_reprs - clear PR VSIs configuration
* @pf: poiner to PF struct
* @ctrl_vsi: pointer to switchdev control VSI
*/
static void
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
struct ice_vsi *vsi = vf->repr->src_vsi;
/* Skip VFs that aren't configured */
if (!vf->repr->dst)
continue;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
} }
} }
...@@ -210,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -210,11 +252,13 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{ {
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi; struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0; int max_vsi_num = 0;
int i; struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi; struct ice_vsi *vsi = vf->repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
...@@ -231,6 +275,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -231,6 +275,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI); ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst); metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
goto err; goto err;
} }
...@@ -239,6 +284,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -239,6 +284,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
vf->hw_lan_addr.addr, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI); ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst); metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err; goto err;
} }
...@@ -252,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -252,8 +298,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
netif_keep_dst(vf->repr->netdev); netif_keep_dst(vf->repr->netdev);
} }
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf) {
struct ice_repr *repr = pf->vf[i].repr; struct ice_repr *repr = vf->repr;
struct ice_vsi *vsi = repr->src_vsi; struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst; struct metadata_dst *dst;
...@@ -266,42 +312,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf) ...@@ -266,42 +312,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0; return 0;
err: err:
for (i = i - 1; i >= 0; i--) { ice_eswitch_release_reprs(pf, ctrl_vsi);
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
}
return -ENODEV; return -ENODEV;
} }
/**
* ice_eswitch_release_reprs - clear PR VSIs configuration
* @pf: poiner to PF struct
* @ctrl_vsi: pointer to switchdev control VSI
*/
static void
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
int i;
ice_for_each_vf(pf, i) {
struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
struct ice_vf *vf = &pf->vf[i];
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
}
}
/** /**
* ice_eswitch_update_repr - reconfigure VF port representor * ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured * @vsi: VF VSI for which port representor is configured
...@@ -316,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ...@@ -316,7 +331,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf)) if (!ice_is_switchdev_running(pf))
return; return;
vf = &pf->vf[vsi->vf_id]; vf = vsi->vf;
repr = vf->repr; repr = vf->repr;
repr->src_vsi = vsi; repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num; repr->dst->u.port_info.port_id = vsi->vsi_num;
...@@ -324,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi) ...@@ -324,7 +339,8 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) { if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI); ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id); dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
vsi->vf->vf_id);
} }
} }
...@@ -408,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf) ...@@ -408,7 +424,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi * static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL); return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
} }
/** /**
...@@ -417,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ...@@ -417,10 +433,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*/ */
static void ice_eswitch_napi_del(struct ice_pf *pf) static void ice_eswitch_napi_del(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
ice_for_each_vf(pf, i) lockdep_assert_held(&pf->vfs.table_lock);
netif_napi_del(&pf->vf[i].repr->q_vector->napi);
ice_for_each_vf(pf, bkt, vf)
netif_napi_del(&vf->repr->q_vector->napi);
} }
/** /**
...@@ -429,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf) ...@@ -429,10 +448,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/ */
static void ice_eswitch_napi_enable(struct ice_pf *pf) static void ice_eswitch_napi_enable(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, i) ice_for_each_vf(pf, bkt, vf)
napi_enable(&pf->vf[i].repr->q_vector->napi); napi_enable(&vf->repr->q_vector->napi);
} }
/** /**
...@@ -441,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf) ...@@ -441,10 +463,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/ */
static void ice_eswitch_napi_disable(struct ice_pf *pf) static void ice_eswitch_napi_disable(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, i) ice_for_each_vf(pf, bkt, vf)
napi_disable(&pf->vf[i].repr->q_vector->napi); napi_disable(&vf->repr->q_vector->napi);
} }
/** /**
...@@ -522,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -522,7 +547,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode) if (pf->eswitch_mode == mode)
return 0; return 0;
if (pf->num_alloc_vfs) { if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created"); dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created"); NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -613,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf) ...@@ -613,16 +638,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/ */
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{ {
struct ice_repr *repr; struct ice_vf *vf;
int i; unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state)) if (test_bit(ICE_DOWN, pf->state))
return; return;
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf) {
repr = pf->vf[i].repr; if (vf->repr)
if (repr) ice_repr_start_tx_queues(vf->repr);
ice_repr_start_tx_queues(repr);
} }
} }
...@@ -632,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf) ...@@ -632,16 +658,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/ */
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{ {
struct ice_repr *repr; struct ice_vf *vf;
int i; unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state)) if (test_bit(ICE_DOWN, pf->state))
return; return;
ice_for_each_vf(pf, i) { ice_for_each_vf(pf, bkt, vf) {
repr = pf->vf[i].repr; if (vf->repr)
if (repr) ice_repr_stop_tx_queues(vf->repr);
ice_repr_stop_tx_queues(repr);
} }
} }
......
...@@ -316,16 +316,20 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ...@@ -316,16 +316,20 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
*/ */
static bool ice_active_vfs(struct ice_pf *pf) static bool ice_active_vfs(struct ice_pf *pf)
{ {
unsigned int i; bool active = false;
struct ice_vf *vf;
ice_for_each_vf(pf, i) { unsigned int bkt;
struct ice_vf *vf = &pf->vf[i];
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) rcu_read_lock();
return true; ice_for_each_vf_rcu(pf, bkt, vf) {
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
active = true;
break;
}
} }
rcu_read_unlock();
return false; return active;
} }
/** /**
...@@ -1298,7 +1302,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1298,7 +1302,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
} }
if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) && if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
pf->num_alloc_vfs) { ice_has_vfs(pf)) {
dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n"); dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
/* toggle bit back to previous state */ /* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags); change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
......
This diff is collapsed.
...@@ -52,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); ...@@ -52,7 +52,8 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch); enum ice_vsi_type vsi_type, struct ice_vf *vf,
struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi); void ice_napi_del(struct ice_vsi *vsi);
......
...@@ -505,7 +505,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ...@@ -505,7 +505,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned int i; struct ice_vf *vf;
unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type); dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
...@@ -520,8 +521,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) ...@@ -520,8 +521,10 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf); ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */ /* Disable VFs until reset is completed */
ice_for_each_vf(pf, i) mutex_lock(&pf->vfs.table_lock);
ice_set_vf_state_qs_dis(&pf->vf[i]); ice_for_each_vf(pf, bkt, vf)
ice_set_vf_state_qs_dis(vf);
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) { if (ice_is_eswitch_mode_switchdev(pf)) {
if (reset_type != ICE_RESET_PFR) if (reset_type != ICE_RESET_PFR)
...@@ -1666,7 +1669,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1666,7 +1669,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
unsigned int i; struct ice_vf *vf;
unsigned int bkt;
u32 reg; u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
...@@ -1754,47 +1758,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1754,47 +1758,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then /* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending * increment counters and set print pending
*/ */
ice_for_each_vf(pf, i) { mutex_lock(&pf->vfs.table_lock);
struct ice_vf *vf = &pf->vf[i]; ice_for_each_vf(pf, bkt, vf) {
reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) { if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF); wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++; vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf)) if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
i); vf->vf_id);
} }
reg = rd32(hw, VP_MDET_TX_TCLAN(i)); reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) { if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF); wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++; vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf)) if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
i); vf->vf_id);
} }
reg = rd32(hw, VP_MDET_TX_TDPU(i)); reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) { if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF); wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++; vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf)) if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
i); vf->vf_id);
} }
reg = rd32(hw, VP_MDET_RX(i)); reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) { if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF); wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++; vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf)) if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
i); vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the /* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool * PF can be configured to reset the VF through ethtool
...@@ -1805,12 +1808,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1805,12 +1808,13 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset. * reset, so print the event prior to reset.
*/ */
ice_print_vf_rx_mdd_event(vf); ice_print_vf_rx_mdd_event(vf);
mutex_lock(&pf->vf[i].cfg_lock); mutex_lock(&vf->cfg_lock);
ice_reset_vf(&pf->vf[i], false); ice_reset_vf(vf, false);
mutex_unlock(&pf->vf[i].cfg_lock); mutex_unlock(&vf->cfg_lock);
} }
} }
} }
mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf); ice_print_vfs_mdd_events(pf);
} }
...@@ -2439,7 +2443,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) ...@@ -2439,7 +2443,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */ /* skip this unused q_vector */
continue; continue;
} }
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler, err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name, IRQF_SHARED, q_vector->name,
q_vector); q_vector);
...@@ -3386,14 +3390,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) ...@@ -3386,14 +3390,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi * static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL); return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
} }
static struct ice_vsi * static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch) struct ice_channel *ch)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch); return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
} }
/** /**
...@@ -3407,7 +3411,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -3407,7 +3411,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi * static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL); return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
} }
/** /**
...@@ -3421,7 +3425,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ...@@ -3421,7 +3425,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi * struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{ {
return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL); return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
} }
/** /**
...@@ -3680,6 +3684,7 @@ static void ice_deinit_pf(struct ice_pf *pf) ...@@ -3680,6 +3684,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex); mutex_destroy(&pf->avail_q_mutex);
mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) { if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs); bitmap_free(pf->avail_txqs);
...@@ -3712,7 +3717,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) ...@@ -3712,7 +3717,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) { if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs, pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT); ICE_MAX_VF_COUNT);
} }
clear_bit(ICE_FLAG_RSS_ENA, pf->flags); clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
...@@ -3779,6 +3784,9 @@ static int ice_init_pf(struct ice_pf *pf) ...@@ -3779,6 +3784,9 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM; return -ENOMEM;
} }
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
return 0; return 0;
} }
......
...@@ -284,6 +284,8 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -284,6 +284,8 @@ static int ice_repr_add(struct ice_vf *vf)
devlink_port_type_eth_set(&vf->devlink_port, repr->netdev); devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
ice_vc_change_ops_to_repr(&vf->vc_ops);
return 0; return 0;
err_netdev: err_netdev:
...@@ -311,6 +313,9 @@ static int ice_repr_add(struct ice_vf *vf) ...@@ -311,6 +313,9 @@ static int ice_repr_add(struct ice_vf *vf)
*/ */
static void ice_repr_rem(struct ice_vf *vf) static void ice_repr_rem(struct ice_vf *vf)
{ {
if (!vf->repr)
return;
ice_devlink_destroy_vf_port(vf); ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector); kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL; vf->repr->q_vector = NULL;
...@@ -323,6 +328,23 @@ static void ice_repr_rem(struct ice_vf *vf) ...@@ -323,6 +328,23 @@ static void ice_repr_rem(struct ice_vf *vf)
#endif #endif
kfree(vf->repr); kfree(vf->repr);
vf->repr = NULL; vf->repr = NULL;
ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem(vf);
} }
/** /**
...@@ -331,48 +353,26 @@ static void ice_repr_rem(struct ice_vf *vf) ...@@ -331,48 +353,26 @@ static void ice_repr_rem(struct ice_vf *vf)
*/ */
int ice_repr_add_for_all_vfs(struct ice_pf *pf) int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{ {
struct ice_vf *vf;
unsigned int bkt;
int err; int err;
int i;
ice_for_each_vf(pf, i) { lockdep_assert_held(&pf->vfs.table_lock);
struct ice_vf *vf = &pf->vf[i];
ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf); err = ice_repr_add(vf);
if (err) if (err)
goto err; goto err;
ice_vc_change_ops_to_repr(&vf->vc_ops);
} }
return 0; return 0;
err: err:
for (i = i - 1; i >= 0; i--) { ice_repr_rem_from_all_vfs(pf);
struct ice_vf *vf = &pf->vf[i];
ice_repr_rem(vf);
ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
return err; return err;
} }
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
int i;
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
ice_repr_rem(vf);
ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
}
/** /**
* ice_repr_start_tx_queues - start Tx queues of port representor * ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure * @repr: pointer to repr structure
......
...@@ -1165,7 +1165,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1165,7 +1165,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct ice_vsi *ctrl_vsi = rx_ring->vsi; struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID && if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf_id != ICE_INVAL_VFID) ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0); ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++; cleaned_count++;
......
...@@ -34,9 +34,10 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) ...@@ -34,9 +34,10 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
{ {
struct ice_vsi_vlan_ops *vlan_ops; struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vf *vf; struct ice_vf *vf = vsi->vf;
vf = &pf->vf[vsi->vf_id]; if (WARN_ON(!vf))
return;
if (ice_is_dvm_ena(&pf->hw)) { if (ice_is_dvm_ena(&pf->hw)) {
vlan_ops = &vsi->outer_vlan_ops; vlan_ops = &vsi->outer_vlan_ops;
...@@ -126,9 +127,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) ...@@ -126,9 +127,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
*/ */
void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
{ {
struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vsi_vlan_ops *vlan_ops; struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vf *vf = vsi->vf;
struct device *dev;
if (WARN_ON(!vf))
return;
dev = ice_pf_to_dev(vf->pf);
if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
return; return;
...@@ -192,7 +198,10 @@ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi) ...@@ -192,7 +198,10 @@ void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
*/ */
void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi) void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi)
{ {
struct ice_vf *vf = &vsi->back->vf[vsi->vf_id]; struct ice_vf *vf = vsi->vf;
if (WARN_ON(!vf))
return;
if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf)) if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
return; return;
......
...@@ -1288,15 +1288,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, ...@@ -1288,15 +1288,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc) union ice_32b_rx_flex_desc *rx_desc)
{ {
struct ice_pf *pf = ctrl_vsi->back; struct ice_pf *pf = ctrl_vsi->back;
struct ice_vf *vf = ctrl_vsi->vf;
struct ice_vf_fdir_ctx *ctx_done; struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq; struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir; struct ice_vf_fdir *fdir;
unsigned long flags; unsigned long flags;
struct device *dev; struct device *dev;
struct ice_vf *vf;
int ret; int ret;
vf = &pf->vf[ctrl_vsi->vf_id]; if (WARN_ON(!vf))
return;
fdir = &vf->fdir; fdir = &vf->fdir;
ctx_done = &fdir->ctx_done; ctx_done = &fdir->ctx_done;
...@@ -1571,15 +1572,16 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, ...@@ -1571,15 +1572,16 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
*/ */
void ice_flush_fdir_ctx(struct ice_pf *pf) void ice_flush_fdir_ctx(struct ice_pf *pf)
{ {
int i; struct ice_vf *vf;
unsigned int bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state)) if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
return; return;
ice_for_each_vf(pf, i) { mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status; enum virtchnl_fdir_prgm_status status;
struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx; struct ice_vf_fdir_ctx *ctx;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -1633,6 +1635,7 @@ void ice_flush_fdir_ctx(struct ice_pf *pf) ...@@ -1633,6 +1635,7 @@ void ice_flush_fdir_ctx(struct ice_pf *pf)
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
} }
mutex_unlock(&pf->vfs.table_lock);
} }
/** /**
......
...@@ -39,8 +39,50 @@ ...@@ -39,8 +39,50 @@
#define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20 #define ICE_MAX_VF_RESET_SLEEP_MS 20
#define ice_for_each_vf(pf, i) \ /* VF Hash Table access functions
for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) *
* These functions provide abstraction for interacting with the VF hash table.
* In general, direct access to the hash table should be avoided outside of
* these functions where possible.
*
* The VF entries in the hash table are protected by reference counting to
* track lifetime of accesses from the table. The ice_get_vf_by_id() function
* obtains a reference to the VF structure which must be dropped by using
* ice_put_vf().
*/
/**
* ice_for_each_vf - Iterate over each VF entry
* @pf: pointer to the PF private structure
* @bkt: bucket index used for iteration
* @vf: pointer to the VF entry currently being processed in the loop.
*
* The bkt variable is an unsigned integer iterator used to traverse the VF
* entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
* Use vf->vf_id to get the id number if needed.
*
* The caller is expected to be under the table_lock mutex for the entire
* loop. Use this iterator if your loop is long or if it might sleep.
*/
#define ice_for_each_vf(pf, bkt, vf) \
hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
/**
* ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
* @pf: pointer to the PF private structure
* @bkt: bucket index used for iteration
* @vf: pointer to the VF entry currently being processed in the loop.
*
* The bkt variable is an unsigned integer iterator used to traverse the VF
* entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
* Use vf->vf_id to get the id number if needed.
*
* The caller is expected to be under rcu_read_lock() for the entire loop.
* Only use this iterator if your loop is short and you can guarantee it does
* not sleep.
*/
#define ice_for_each_vf_rcu(pf, bkt, vf) \
hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
/* Specific VF states */ /* Specific VF states */
enum ice_vf_states { enum ice_vf_states {
...@@ -104,8 +146,22 @@ struct ice_vc_vf_ops { ...@@ -104,8 +146,22 @@ struct ice_vc_vf_ops {
int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
}; };
/* Virtchnl/SR-IOV config info */
struct ice_vfs {
DECLARE_HASHTABLE(table, 8); /* table of VF entries */
struct mutex table_lock; /* Lock for protecting the hash table */
u16 num_supported; /* max supported VFs on this PF */
u16 num_qps_per; /* number of queue pairs per VF */
u16 num_msix_per; /* number of MSI-X vectors per VF */
unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); /* malicious VF indicator */
};
/* VF information structure */ /* VF information structure */
struct ice_vf { struct ice_vf {
struct hlist_node entry;
struct rcu_head rcu;
struct kref refcnt;
struct ice_pf *pf; struct ice_pf *pf;
/* Used during virtchnl message handling and NDO ops against the VF /* Used during virtchnl message handling and NDO ops against the VF
...@@ -162,6 +218,10 @@ struct ice_vf { ...@@ -162,6 +218,10 @@ struct ice_vf {
}; };
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
void ice_process_vflr_event(struct ice_pf *pf); void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
...@@ -221,6 +281,25 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, ...@@ -221,6 +281,25 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
bool ice_vf_is_port_vlan_ena(struct ice_vf *vf); bool ice_vf_is_port_vlan_ena(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */ #else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
return NULL;
}
static inline void ice_put_vf(struct ice_vf *vf)
{
}
static inline bool ice_has_vfs(struct ice_pf *pf)
{
return false;
}
static inline u16 ice_get_num_vfs(struct ice_pf *pf)
{
return 0;
}
static inline void ice_process_vflr_event(struct ice_pf *pf) { } static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { } static inline void ice_free_vfs(struct ice_pf *pf) { }
static inline static inline
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment