Commit 4f8d1d3a authored by Moshe Tal's avatar Moshe Tal Committed by Saeed Mahameed

net/mlx5e: HTB, move ids to selq_params struct

HTB id fields are needed for selecting queue. Moving them to the
selq_params struct will simplify synchronization between control flow
and mlx5e_select_queues and will keep the IDs in the hot cacheline of
mlx5e_selq_params.

Replace mlx5e_selq_prepare() with separate functions that change subsets
of parameters, while keeping the rest.

This also will be useful to hide mlx5e_htb structure from the rest of the
driver in a later patch in this series.
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarMoshe Tal <moshet@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
parent efe31799
...@@ -904,8 +904,6 @@ struct mlx5e_htb { ...@@ -904,8 +904,6 @@ struct mlx5e_htb {
DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES); DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
struct mlx5e_sq_stats **qos_sq_stats; struct mlx5e_sq_stats **qos_sq_stats;
u16 max_qos_sqs; u16 max_qos_sqs;
u16 maj_id;
u16 defcls;
}; };
struct mlx5e_trap; struct mlx5e_trap;
......
...@@ -144,9 +144,9 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node ...@@ -144,9 +144,9 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node
static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{ {
/* These channel params are safe to access from the datapath, because: /* These channel params are safe to access from the datapath, because:
* 1. This function is called only after checking priv->htb.maj_id != 0, * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active. * and the number of queues can't change while HTB offload is active.
* 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock, * mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues. * preventing other code from changing the number of queues.
*/ */
...@@ -417,7 +417,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) ...@@ -417,7 +417,7 @@ int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
struct mlx5e_qos_node *node = NULL; struct mlx5e_qos_node *node = NULL;
int bkt, err; int bkt, err;
if (!priv->htb.maj_id) if (!mlx5e_selq_is_htb_enabled(&priv->selq))
return 0; return 0;
err = mlx5e_qos_alloc_queues(priv, chs); err = mlx5e_qos_alloc_queues(priv, chs);
...@@ -501,10 +501,10 @@ mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, ...@@ -501,10 +501,10 @@ mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
mlx5e_selq_prepare_htb(&priv->selq, htb_maj_id, htb_defcls);
opened = test_bit(MLX5E_STATE_OPENED, &priv->state); opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) { if (opened) {
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
err = mlx5e_qos_alloc_queues(priv, &priv->channels); err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err) if (err)
goto err_cancel_selq; goto err_cancel_selq;
...@@ -522,14 +522,7 @@ mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, ...@@ -522,14 +522,7 @@ mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
goto err_sw_node_delete; goto err_sw_node_delete;
} }
WRITE_ONCE(priv->htb.defcls, htb_defcls); mlx5e_selq_apply(&priv->selq);
/* Order maj_id after defcls - pairs with
* mlx5e_select_queue/mlx5e_select_htb_queues.
*/
smp_store_release(&priv->htb.maj_id, htb_maj_id);
if (opened)
mlx5e_selq_apply(&priv->selq);
return 0; return 0;
...@@ -556,11 +549,9 @@ static int mlx5e_htb_root_del(struct mlx5e_priv *priv) ...@@ -556,11 +549,9 @@ static int mlx5e_htb_root_del(struct mlx5e_priv *priv)
*/ */
synchronize_net(); synchronize_net();
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false); mlx5e_selq_prepare_htb(&priv->selq, 0, 0);
mlx5e_selq_apply(&priv->selq); mlx5e_selq_apply(&priv->selq);
WRITE_ONCE(priv->htb.maj_id, 0);
root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT); root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
if (!root) { if (!root) {
qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n"); qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
......
...@@ -19,6 +19,8 @@ struct mlx5e_selq_params { ...@@ -19,6 +19,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1; bool is_ptp : 1;
}; };
}; };
u16 htb_maj_id;
u16 htb_defcls;
}; };
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
...@@ -44,6 +46,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) ...@@ -44,6 +46,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1, .num_tcs = 1,
.is_htb = false, .is_htb = false,
.is_ptp = false, .is_ptp = false,
.htb_maj_id = 0,
.htb_defcls = 0,
}; };
rcu_assign_pointer(selq->active, init_params); rcu_assign_pointer(selq->active, init_params);
...@@ -64,21 +68,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq) ...@@ -64,21 +68,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL; selq->standby = NULL;
} }
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb) void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{ {
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock); lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared); WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true; selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels; selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params); selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues = selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs; selq->standby->num_channels * selq->standby->num_tcs;
selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS); selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
} }
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *selq_active =
rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
return selq_active->htb_maj_id;
}
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
{
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->is_htb = htb_maj_id;
selq->standby->htb_maj_id = htb_maj_id;
selq->standby->htb_defcls = htb_defcls;
}
void mlx5e_selq_apply(struct mlx5e_selq *selq) void mlx5e_selq_apply(struct mlx5e_selq *selq)
{ {
struct mlx5e_selq_params *old_params; struct mlx5e_selq_params *old_params;
...@@ -137,15 +170,16 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb, ...@@ -137,15 +170,16 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up; return selq->num_regular_queues + up;
} }
static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb) static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_selq_params *selq)
{ {
u16 classid; u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id)) if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority); classid = TC_H_MIN(skb->priority);
else else
classid = READ_ONCE(priv->htb.defcls); classid = selq->htb_defcls;
if (!classid) if (!classid)
return 0; return 0;
...@@ -187,10 +221,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -187,10 +221,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels; up * selq->num_channels;
} }
if (unlikely(selq->is_htb)) { if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */ /* num_tcs == 1, shortcut for PTP */
txq_ix = mlx5e_select_htb_queue(priv, skb); txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0) if (txq_ix > 0)
return txq_ix; return txq_ix;
......
...@@ -21,7 +21,9 @@ struct sk_buff; ...@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock); int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq); void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb); void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq); void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq); void mlx5e_selq_cancel(struct mlx5e_selq *selq);
......
...@@ -459,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -459,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue * because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached. * qdiscs are attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL; err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__); __func__);
...@@ -2075,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) ...@@ -2075,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are * the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached. * attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__); __func__);
return -EINVAL; return -EINVAL;
......
...@@ -2839,7 +2839,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, ...@@ -2839,7 +2839,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
new_chs.params = *params; new_chs.params = *params;
mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id); mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
err = mlx5e_open_channels(priv, &new_chs); err = mlx5e_open_channels(priv, &new_chs);
if (err) if (err)
...@@ -2895,7 +2895,7 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -2895,7 +2895,7 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err; int err;
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id); mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state); set_bit(MLX5E_STATE_OPENED, &priv->state);
...@@ -3406,7 +3406,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3406,7 +3406,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached /* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB. * simultaneously with the offloaded HTB.
*/ */
if (WARN_ON(priv->htb.maj_id)) if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL; return -EINVAL;
switch (mqprio->mode) { switch (mqprio->mode) {
...@@ -3672,6 +3672,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) ...@@ -3672,6 +3672,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable) static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
...@@ -3681,12 +3682,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable) ...@@ -3681,12 +3682,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
} }
#endif #endif
if (!enable && priv->htb.maj_id) { mutex_lock(&priv->state_lock);
if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n"); netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
return -EINVAL; err = -EINVAL;
} }
mutex_unlock(&priv->state_lock);
return 0; return err;
} }
static int set_feature_rx_all(struct net_device *netdev, bool enable) static int set_feature_rx_all(struct net_device *netdev, bool enable)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment