Commit 459a70ba authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
ice: one by one port representors creation

Michal Swiatkowski says:

Currently ice supports creating port representors only for VFs. For that
use case they can be created and removed in one step.

This patchset is refactoring current flow to support port representor
creation also for subfunctions and SIOV. In this case port representors
need to be created and removed one by one. Also, they can be added and
removed while other port representors are running.

To achieve that we need to change the switchdev configuration flow.
Three first patches are only cosmetic (renaming, removing not used code).
Next few ones are preparation for new flow. The most important one
is "add VF representor one by one". It fully implements new flow.

New type of port representor (for subfunction) will be introduced in
follow up patchset.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: reserve number of CP queues
  ice: adjust switchdev rebuild path
  ice: add VF representors one by one
  ice: realloc VSI stats arrays
  ice: set Tx topology every time new repr is added
  ice: allow changing SWITCHDEV_CTRL VSI queues
  ice: return pointer to representor
  ice: make representor code generic
  ice: remove VF pointer reference in eswitch code
  ice: track port representors in xarray
  ice: use repr instead of vf->repr
  ice: track q_id in representor
  ice: remove unused control VSI parameter
  ice: remove redundant max_vsi_num variable
  ice: rename switchdev to eswitch
====================

Link: https://lore.kernel.org/r/20231114181449.1290117-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a49296e0 19b39cae
......@@ -522,11 +522,18 @@ enum ice_misc_thread_tasks {
ICE_MISC_THREAD_NBITS /* must be last */
};
struct ice_switchdev_info {
struct ice_eswitch {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
struct xarray reprs;
bool is_running;
/* struct to allow cp queues management optimization */
struct {
int to_reach;
int value;
bool is_reaching;
} qs;
};
struct ice_agg_node {
......@@ -637,7 +644,7 @@ struct ice_pf {
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev;
struct ice_eswitch eswitch;
struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
......@@ -846,7 +853,7 @@ static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
*/
static inline bool ice_is_switchdev_running(struct ice_pf *pf)
{
return pf->switchdev.is_running;
return pf->eswitch.is_running;
}
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
......
......@@ -810,6 +810,10 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
struct ice_vf *vf;
int i;
if (node->rate_node)
/* already added, skip to the next */
goto traverse_children;
if (node->parent == tc_node) {
/* create root node */
rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
......@@ -831,6 +835,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node
if (rate_node && !IS_ERR(rate_node))
node->rate_node = rate_node;
traverse_children:
for (i = 0; i < node->num_children; i++)
ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
}
......@@ -861,6 +866,30 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
return 0;
}
static void ice_clear_rate_nodes(struct ice_sched_node *node)
{
node->rate_node = NULL;
for (int i = 0; i < node->num_children; i++)
ice_clear_rate_nodes(node->children[i]);
}
/**
* ice_devlink_rate_clear_tx_topology - clear node->rate_node
* @vsi: main vsi struct
*
* Clear rate_node to cleanup creation of Tx topology.
*
*/
void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi)
{
struct ice_port_info *pi = vsi->port_info;
mutex_lock(&pi->sched_lock);
ice_clear_rate_nodes(pi->root->children[0]);
mutex_unlock(&pi->sched_lock);
}
/**
* ice_set_object_tx_share - sets node scheduling parameter
* @pi: devlink struct instance
......
......@@ -20,5 +20,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf);
int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
void ice_devlink_rate_clear_tx_topology(struct ice_vsi *vsi);
#endif /* _ICE_DEVLINK_H_ */
......@@ -7,8 +7,9 @@
#include <net/devlink.h>
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_release(struct ice_pf *pf);
int ice_eswitch_configure(struct ice_pf *pf);
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
......@@ -17,7 +18,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
......@@ -25,8 +26,15 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
static inline int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
return -EOPNOTSUPP;
}
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
......@@ -34,7 +42,8 @@ static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline void
ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
......@@ -68,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
return NETDEV_TX_BUSY;
}
static inline void
ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
......@@ -893,10 +893,14 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
}
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
else if (vsi->vf && vsi->vf->repr)
vsi->vf->repr->br_port = NULL;
} else {
struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
if (repr)
repr->br_port = NULL;
}
xa_erase(&bridge->ports, br_port->vsi_idx);
ice_eswitch_br_port_vlans_flush(br_port);
......@@ -947,7 +951,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
static int
ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
{
struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *vsi = pf->eswitch.uplink_vsi;
struct ice_esw_br_port *br_port;
int err;
......@@ -1185,7 +1189,7 @@ ice_eswitch_br_port_event(struct notifier_block *nb,
static void
ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
struct ice_esw_br_offloads *br_offloads = pf->eswitch.br_offloads;
ASSERT_RTNL();
......@@ -1194,7 +1198,7 @@ ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
pf->switchdev.br_offloads = NULL;
pf->eswitch.br_offloads = NULL;
kfree(br_offloads);
}
......@@ -1205,14 +1209,14 @@ ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
ASSERT_RTNL();
if (pf->switchdev.br_offloads)
if (pf->eswitch.br_offloads)
return ERR_PTR(-EEXIST);
br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
pf->switchdev.br_offloads = br_offloads;
pf->eswitch.br_offloads = br_offloads;
br_offloads->pf = pf;
return br_offloads;
......@@ -1223,7 +1227,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
{
struct ice_esw_br_offloads *br_offloads;
br_offloads = pf->switchdev.br_offloads;
br_offloads = pf->eswitch.br_offloads;
if (!br_offloads)
return;
......
......@@ -212,11 +212,18 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq));
break;
case ICE_VSI_SWITCHDEV_CTRL:
/* The number of queues for ctrl VSI is equal to number of VFs.
/* The number of queues for ctrl VSI is equal to number of PRs
* Each ring is associated to the corresponding VF_PR netdev.
* Tx and Rx rings are always equal
*/
vsi->alloc_txq = ice_get_num_vfs(pf);
vsi->alloc_rxq = vsi->alloc_txq;
if (vsi->req_txq && vsi->req_rxq) {
vsi->alloc_txq = vsi->req_txq;
vsi->alloc_rxq = vsi->req_rxq;
} else {
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
}
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
......@@ -519,16 +526,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
struct ice_vf *vf;
unsigned int bkt;
struct ice_repr *repr;
unsigned long id;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf)
napi_schedule(&vf->repr->q_vector->napi);
rcu_read_unlock();
xa_for_each(&pf->eswitch.reprs, id, repr)
napi_schedule(&repr->q_vector->napi);
return IRQ_HANDLED;
}
......@@ -3071,27 +3076,26 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
* ice_vsi_realloc_stat_arrays - Frees unused stat structures
* ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
* @vsi: VSI pointer
* @prev_txq: Number of Tx rings before ring reallocation
* @prev_rxq: Number of Rx rings before ring reallocation
*/
static void
ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
static int
ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
{
u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
struct ice_ring_stats **tx_ring_stats;
struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf = vsi->back;
u16 prev_txq = vsi->alloc_txq;
u16 prev_rxq = vsi->alloc_rxq;
int i;
if (!prev_txq || !prev_rxq)
return;
if (vsi->type == ICE_VSI_CHNL)
return;
vsi_stat = pf->vsi_stats[vsi->idx];
if (vsi->num_txq < prev_txq) {
for (i = vsi->num_txq; i < prev_txq; i++) {
if (req_txq < prev_txq) {
for (i = req_txq; i < prev_txq; i++) {
if (vsi_stat->tx_ring_stats[i]) {
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
......@@ -3099,14 +3103,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
if (vsi->num_rxq < prev_rxq) {
for (i = vsi->num_rxq; i < prev_rxq; i++) {
tx_ring_stats = vsi_stat->rx_ring_stats;
vsi_stat->tx_ring_stats =
krealloc_array(vsi_stat->tx_ring_stats, req_txq,
sizeof(*vsi_stat->tx_ring_stats),
GFP_KERNEL | __GFP_ZERO);
if (!vsi_stat->tx_ring_stats) {
vsi_stat->tx_ring_stats = tx_ring_stats;
return -ENOMEM;
}
if (req_rxq < prev_rxq) {
for (i = req_rxq; i < prev_rxq; i++) {
if (vsi_stat->rx_ring_stats[i]) {
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
}
rx_ring_stats = vsi_stat->rx_ring_stats;
vsi_stat->rx_ring_stats =
krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
sizeof(*vsi_stat->rx_ring_stats),
GFP_KERNEL | __GFP_ZERO);
if (!vsi_stat->rx_ring_stats) {
vsi_stat->rx_ring_stats = rx_ring_stats;
return -ENOMEM;
}
return 0;
}
/**
......@@ -3123,9 +3149,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
struct ice_pf *pf;
int ret;
if (!vsi)
return -EINVAL;
......@@ -3144,8 +3170,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
prev_txq = vsi->num_txq;
prev_rxq = vsi->num_rxq;
ret = ice_vsi_realloc_stat_arrays(vsi);
if (ret)
goto err_vsi_cfg;
ice_vsi_decfg(vsi);
ret = ice_vsi_cfg_def(vsi, &params);
......@@ -3163,8 +3190,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
......
......@@ -4702,6 +4702,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_ptp_release(pf);
if (test_bit(ICE_FLAG_DPLL, pf->flags))
ice_dpll_deinit(pf);
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
xa_destroy(&pf->eswitch.reprs);
}
static void ice_init_wakeup(struct ice_pf *pf)
......@@ -7410,9 +7412,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_ptp_cfg_timestamp(pf, true);
}
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
err = ice_eswitch_rebuild(pf);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
dev_err(dev, "Switchdev rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
......
......@@ -14,7 +14,7 @@
*/
static int ice_repr_get_sw_port_id(struct ice_repr *repr)
{
return repr->vf->pf->hw.port_info->lport;
return repr->src_vsi->back->hw.port_info->lport;
}
/**
......@@ -35,7 +35,7 @@ ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
return -EOPNOTSUPP;
res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
repr->vf->vf_id);
repr->id);
if (res <= 0)
return -EOPNOTSUPP;
return 0;
......@@ -278,25 +278,67 @@ ice_repr_reg_netdev(struct net_device *netdev)
return register_netdev(netdev);
}
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
devl_unlock(devlink_port->devlink);
}
/**
* ice_repr_add - add representor for VF
* @vf: pointer to VF structure
* ice_repr_rem - remove representor from VF
* @repr: pointer to representor structure
*/
static int ice_repr_add(struct ice_vf *vf)
static void ice_repr_rem(struct ice_repr *repr)
{
kfree(repr->q_vector);
free_netdev(repr->netdev);
kfree(repr);
}
/**
* ice_repr_rem_vf - remove representor from VF
* @repr: pointer to representor structure
*/
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
ice_repr_rem(repr);
}
static void ice_repr_set_tx_topology(struct ice_pf *pf)
{
struct devlink *devlink;
/* only export if ADQ and DCB disabled and eswitch enabled*/
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) ||
!ice_is_switchdev_running(pf))
return;
devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
}
/**
* ice_repr_add - add representor for generic VSI
* @pf: pointer to PF structure
* @src_vsi: pointer to VSI structure of device to represent
* @parent_mac: device MAC address
*/
static struct ice_repr *
ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
struct ice_vsi *vsi;
int err;
vsi = ice_get_vf_vsi(vf);
if (!vsi)
return -EINVAL;
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
......@@ -304,9 +346,7 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc;
}
repr->src_vsi = vsi;
repr->vf = vf;
vf->repr = repr;
repr->src_vsi = src_vsi;
np = netdev_priv(repr->netdev);
np->repr = repr;
......@@ -316,10 +356,40 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_alloc_q_vector;
}
repr->q_vector = q_vector;
repr->q_id = repr->id;
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
err_alloc_q_vector:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
return ERR_PTR(err);
}
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
{
struct ice_repr *repr;
struct ice_vsi *vsi;
int err;
vsi = ice_get_vf_vsi(vf);
if (!vsi)
return ERR_PTR(-ENOENT);
err = ice_devlink_create_vf_port(vf);
if (err)
goto err_devlink;
return ERR_PTR(err);
repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_repr_add;
}
repr->vf = vf;
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
......@@ -331,100 +401,23 @@ static int ice_repr_add(struct ice_vf *vf)
goto err_netdev;
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
return 0;
return repr;
err_netdev:
ice_repr_rem(repr);
err_repr_add:
ice_devlink_destroy_vf_port(vf);
err_devlink:
kfree(repr->q_vector);
vf->repr->q_vector = NULL;
err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
kfree(repr);
vf->repr = NULL;
return err;
}
/**
* ice_repr_rem - remove representor from VF
* @vf: pointer to VF structure
*/
static void ice_repr_rem(struct ice_vf *vf)
{
if (!vf->repr)
return;
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
kfree(vf->repr);
vf->repr = NULL;
ice_virtchnl_set_dflt_ops(vf);
}
/**
* ice_repr_rem_from_all_vfs - remove port representor for all VFs
* @pf: pointer to PF structure
*/
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
ice_repr_rem(vf);
/* since all port representors are destroyed, there is
* no point in keeping the nodes
*/
devlink = priv_to_devlink(pf);
devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
devl_unlock(devlink);
return ERR_PTR(err);
}
/**
* ice_repr_add_for_all_vfs - add port representor for all VFs
* @pf: pointer to PF structure
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
{
struct devlink *devlink;
struct ice_vf *vf;
unsigned int bkt;
int err;
lockdep_assert_held(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf);
if (err)
goto err;
}
/* only export if ADQ and DCB disabled */
if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
return 0;
devlink = priv_to_devlink(pf);
ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
return 0;
err:
ice_repr_rem_from_all_vfs(pf);
if (!vsi->vf)
return NULL;
return err;
return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
}
/**
......
......@@ -13,14 +13,17 @@ struct ice_repr {
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
int q_id;
u32 id;
u8 parent_mac[ETH_ALEN];
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
#endif
};
int ice_repr_add_for_all_vfs(struct ice_pf *pf);
void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
......@@ -29,4 +32,6 @@ void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
#endif
......@@ -172,13 +172,14 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
mutex_lock(&vfs->table_lock);
ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
ice_eswitch_release(pf);
mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
ice_eswitch_detach(pf, vf);
ice_dis_vf_qs(vf);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
......@@ -614,6 +615,14 @@ static int ice_start_vfs(struct ice_pf *pf)
goto teardown;
}
retval = ice_eswitch_attach(pf, vf);
if (retval) {
dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
vf->vf_id, retval);
ice_vf_vsi_release(vf);
goto teardown;
}
set_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_ena_vf_mappings(vf);
wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
......@@ -923,6 +932,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
......@@ -932,12 +942,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
clear_bit(ICE_VF_DIS, pf->state);
ret = ice_eswitch_configure(pf);
if (ret) {
dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
goto err_unroll_sriov;
}
/* rearm global interrupts */
if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
ice_irq_dynamic_ena(hw, NULL, NULL);
......
......@@ -653,7 +653,7 @@ static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
ice_tc_is_dev_uplink(target_dev)) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
......@@ -765,7 +765,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
......
......@@ -760,6 +760,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_for_each_vf(pf, bkt, vf) {
mutex_lock(&vf->cfg_lock);
ice_eswitch_detach(pf, vf);
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
......@@ -775,13 +776,11 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
ice_eswitch_attach(pf, vf);
mutex_unlock(&vf->cfg_lock);
}
if (ice_is_eswitch_mode_switchdev(pf))
if (ice_eswitch_rebuild(pf))
dev_warn(dev, "eswitch rebuild failed\n");
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
......@@ -928,7 +927,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
ice_eswitch_update_repr(vsi);
ice_eswitch_update_repr(vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
......
......@@ -130,7 +130,7 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
struct ice_repr *repr;
unsigned long repr_id;
const struct ice_virtchnl_ops *virtchnl_ops;
const struct ice_vf_ops *vf_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment