Commit 5be9541a authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2017-10-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2017-10-26

The series includes some misc fixes for mlx5 core and etherent driver.
Please pull and let me know if there's any problem.

For -Stable:
net/mlx5e: Properly deal with encap flows add/del under neigh update (kernels >= 4.12)
net/mlx5: Fix health work queue spin lock to IRQ safe  (kernels >= 4.13)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9618aec3 be0f161e
...@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv, ...@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
list_add_tail(&delayed_event->list, &priv->waiting_events_list); list_add_tail(&delayed_event->list, &priv->waiting_events_list);
} }
static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, static void delayed_event_release(struct mlx5_device_context *dev_ctx,
struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct mlx5_priv *priv)
{ {
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de; struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n; struct mlx5_delayed_event *n;
struct list_head temp;
/* stop delaying events */ INIT_LIST_HEAD(&temp);
priv->is_accum_events = false;
spin_lock_irq(&priv->ctx_lock);
/* fire all accumulated events before new event comes */ priv->is_accum_events = false;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
spin_unlock_irq(&priv->ctx_lock);
list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list); list_del(&de->list);
kfree(de); kfree(de);
} }
} }
static void cleanup_delayed_evets(struct mlx5_priv *priv) /* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
static void delayed_event_start(struct mlx5_priv *priv)
{ {
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
spin_lock_irq(&priv->ctx_lock); spin_lock_irq(&priv->ctx_lock);
priv->is_accum_events = false; priv->is_accum_events = true;
list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
list_del(&de->list);
kfree(de);
}
spin_unlock_irq(&priv->ctx_lock); spin_unlock_irq(&priv->ctx_lock);
} }
...@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
return; return;
dev_ctx->intf = intf; dev_ctx->intf = intf;
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
priv->is_accum_events = true; delayed_event_start(priv);
dev_ctx->context = intf->add(dev); dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
...@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock); spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list); list_add_tail(&dev_ctx->list, &priv->ctx_list);
fire_delayed_event_locked(dev_ctx, dev, priv);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) { if (dev_ctx->intf->pfault) {
if (priv->pfault) { if (priv->pfault) {
...@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
} }
#endif #endif
spin_unlock_irq(&priv->ctx_lock); spin_unlock_irq(&priv->ctx_lock);
} else {
kfree(dev_ctx);
/* delete all accumulated events */
cleanup_delayed_evets(priv);
} }
delayed_event_release(dev_ctx, priv);
if (!dev_ctx->context)
kfree(dev_ctx);
} }
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
...@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv ...@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx) if (!dev_ctx)
return; return;
delayed_event_start(priv);
if (intf->attach) { if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
return; goto out;
intf->attach(dev, dev_ctx->context); intf->attach(dev, dev_ctx->context);
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else { } else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
return; goto out;
dev_ctx->context = intf->add(dev); dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
} }
out:
delayed_event_release(dev_ctx, priv);
} }
void mlx5_attach_device(struct mlx5_core_dev *dev) void mlx5_attach_device(struct mlx5_core_dev *dev)
...@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, ...@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
if (priv->is_accum_events) if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param); add_delayed_event(priv, dev, event, param);
/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
* still in priv->ctx_list. In this case, only notify the dev_ctx if its
* ADDED or ATTACHED bit are set.
*/
list_for_each_entry(dev_ctx, &priv->ctx_list, list) list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event) if (dev_ctx->intf->event &&
(test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param); dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags); spin_unlock_irqrestore(&priv->ctx_lock, flags);
......
...@@ -41,6 +41,11 @@ ...@@ -41,6 +41,11 @@
#define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0 #define MLX5E_CEE_STATE_DOWN 0
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_LOWEST_PRIO_GROUP = 0,
};
/* If dcbx mode is non-host set the dcbx mode to host. /* If dcbx mode is non-host set the dcbx mode to host.
*/ */
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
...@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, ...@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
bool is_tc_group_6_exist = false;
bool is_zero_bw_ets_tc = false;
int err = 0; int err = 0;
int i; int i;
...@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, ...@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err) if (err)
return err; return err;
}
for (i = 0; i < ets->ets_cap; i++) { err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err) if (err)
return err; return err;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
is_zero_bw_ets_tc = true;
if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
is_tc_group_6_exist = true;
}
/* Report 0% ets tc if exits*/
if (is_zero_bw_ets_tc) {
for (i = 0; i < ets->ets_cap; i++)
if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
ets->tc_tx_bw[i] = 0;
}
/* Update tc_tsa based on fw setting*/
for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
!is_tc_group_6_exist)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
} }
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err; return err;
} }
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_ETS_TC_GROUP_NUM = 0,
};
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{ {
bool any_tc_mapped_to_ets = false; bool any_tc_mapped_to_ets = false;
bool ets_zero_bw = false;
int strict_group; int strict_group;
int i; int i;
for (i = 0; i <= max_tc; i++) for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true; any_tc_mapped_to_ets = true;
if (!ets->tc_tx_bw[i])
ets_zero_bw = true;
}
}
strict_group = any_tc_mapped_to_ets ? 1 : 0; /* strict group has higher priority than ets group */
strict_group = MLX5E_LOWEST_PRIO_GROUP;
if (any_tc_mapped_to_ets)
strict_group++;
if (ets_zero_bw)
strict_group++;
for (i = 0; i <= max_tc; i++) { for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) { switch (ets->tc_tsa[i]) {
...@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) ...@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
tc_group[i] = strict_group++; tc_group[i] = strict_group++;
break; break;
case IEEE_8021QAZ_TSA_ETS: case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
if (ets->tc_tx_bw[i] && ets_zero_bw)
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break; break;
} }
} }
...@@ -146,8 +183,22 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) ...@@ -146,8 +183,22 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc) u8 *tc_group, int max_tc)
{ {
int bw_for_ets_zero_bw_tc = 0;
int last_ets_zero_bw_tc = -1;
int num_ets_zero_bw = 0;
int i; int i;
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
!ets->tc_tx_bw[i]) {
num_ets_zero_bw++;
last_ets_zero_bw_tc = i;
}
}
if (num_ets_zero_bw)
bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
for (i = 0; i <= max_tc; i++) { for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) { switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_VENDOR:
...@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, ...@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break; break;
case IEEE_8021QAZ_TSA_ETS: case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i]; tc_tx_bw[i] = ets->tc_tx_bw[i] ?
ets->tc_tx_bw[i] :
bw_for_ets_zero_bw_tc;
break; break;
} }
} }
/* Make sure the total bw for ets zero bw group is 100% */
if (last_ets_zero_bw_tc != -1)
tc_tx_bw[last_ets_zero_bw_tc] +=
MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
} }
/* If there are ETS BW 0,
* Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
* Set group #0 to all the ETS BW 0 tcs and
* equally splits the 100% BW between them
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
...@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) ...@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err; return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
return err; return err;
} }
...@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, ...@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
} }
/* Validate Bandwidth Sum */ /* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
if (!ets->tc_tx_bw[i]) {
netdev_err(netdev,
"Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
}
bw_sum += ets->tc_tx_bw[i]; bw_sum += ets->tc_tx_bw[i];
}
}
if (bw_sum != 0 && bw_sum != 100) { if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev, netdev_err(netdev,
...@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, ...@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct) int pgid, u8 *bw_pct)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct ieee_ets ets;
struct mlx5_core_dev *mdev = priv->mdev;
if (pgid >= CEE_DCBX_MAX_PGS) { if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev, netdev_err(netdev,
...@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, ...@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
return; return;
} }
if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) mlx5e_dcbnl_ieee_getets(netdev, &ets);
*bw_pct = 0; *bw_pct = ets.tc_tx_bw[pgid];
} }
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
...@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) ...@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i; ets.prio_tc[i] = i;
} }
memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1; ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0; ets.prio_tc[1] = 0;
......
...@@ -78,9 +78,11 @@ struct mlx5e_tc_flow { ...@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
}; };
struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec; struct mlx5_flow_spec spec;
int num_mod_hdr_actions; int num_mod_hdr_actions;
void *mod_hdr_actions; void *mod_hdr_actions;
int mirred_ifindex;
}; };
enum { enum {
...@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, ...@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow); struct mlx5e_tc_flow *flow);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
...@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_flow_handle *rule; struct net_device *out_dev, *encap_dev = NULL;
struct mlx5_flow_handle *rule = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err; int err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex);
err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
out_dev, &encap_dev, flow);
if (err) {
rule = ERR_PTR(err);
if (err != -EAGAIN)
goto err_attach_encap;
}
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
}
err = mlx5_eswitch_add_vlan_action(esw, attr); err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err) { if (err) {
rule = ERR_PTR(err); rule = ERR_PTR(err);
...@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
} }
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); /* we get here if (1) there's no error (rule being null) or when
if (IS_ERR(rule)) * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
goto err_add_rule; */
if (rule != ERR_PTR(-EAGAIN)) {
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule))
goto err_add_rule;
}
return rule; return rule;
err_add_rule: err_add_rule:
...@@ -361,6 +391,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -361,6 +391,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err_add_vlan: err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow); mlx5e_detach_encap(priv, flow);
err_attach_encap:
return rule; return rule;
} }
...@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e) struct mlx5e_encap_entry *e)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
...@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv); mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(flow, &e->flows, encap) {
flow->esw_attr->encap_id = e->encap_id; esw_attr = flow->esw_attr;
flow->rule = mlx5e_tc_add_fdb_flow(priv, esw_attr->encap_id = e->encap_id;
flow->esw_attr->parse_attr, flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
flow);
if (IS_ERR(flow->rule)) { if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule); err = PTR_ERR(flow->rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
...@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e) struct mlx5e_encap_entry *e)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
counter = mlx5_flow_rule_counter(flow->rule); mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
} }
} }
...@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_mirred_egress_redirect(a)) { if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a); int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev, *encap_dev = NULL; struct net_device *out_dev;
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
...@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep; attr->out_rep = rpriv->rep;
} else if (encap) { } else if (encap) {
err = mlx5e_attach_encap(priv, info, parse_attr->mirred_ifindex = ifindex;
out_dev, &encap_dev, flow); parse_attr->tun_info = *info;
if (err && err != -EAGAIN) attr->parse_attr = parse_attr;
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(encap_dev); /* attr->out_rep is resolved when we handle encap */
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
attr->parse_attr = parse_attr;
} else { } else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name); priv->netdev->name, out_dev->name);
...@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, ...@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0) if (err < 0)
goto err_handle_encap_flow; goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else { } else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
...@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, ...@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (IS_ERR(flow->rule)) { if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule); err = PTR_ERR(flow->rule);
goto err_free; if (err != -EAGAIN)
goto err_free;
} }
flow->flags |= MLX5E_TC_FLOW_OFFLOADED; if (err != -EAGAIN)
flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
err = rhashtable_insert_fast(&tc->ht, &flow->node, err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params); tc->ht_params);
if (err) if (err)
...@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, ...@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err_del_rule: err_del_rule:
mlx5e_tc_del_flow(priv, flow); mlx5e_tc_del_flow(priv, flow);
err_handle_encap_flow:
if (err == -EAGAIN) {
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
mlx5e_tc_del_flow(priv, flow);
else
return 0;
}
err_free: err_free:
kvfree(parse_attr); kvfree(parse_attr);
kfree(flow); kfree(flow);
......
...@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) ...@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_health *health = &dev->priv.health; struct mlx5_core_health *health = &dev->priv.health;
unsigned long flags;
spin_lock(&health->wq_lock); spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock(&health->wq_lock); spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&dev->priv.health.recover_work); cancel_delayed_work_sync(&dev->priv.health.recover_work);
} }
......
...@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) ...@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
} }
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
void *ets_tcn_conf;
int err;
err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
if (err)
return err;
ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
tc_configuration[tc]);
*tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
group);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{ {
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
......
...@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); ...@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc); u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct); u8 tc, u8 *bw_pct);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment