Commit c034ff2b authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2022-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-05-31

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

* tag 'mlx5-fixes-2022-05-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Fix mlx5_get_next_dev() peer device matching
  net/mlx5e: Update netdev features after changing XDP state
  net/mlx5: correct ECE offset in query qp output
  net/mlx5e: Disable softirq in mlx5e_activate_rq to avoid race condition
  net/mlx5: CT: Fix header-rewrite re-use for tupels
  net/mlx5e: TC NIC mode, fix tc chains miss table
  net/mlx5: Don't use already freed action pointer
====================

Link: https://lore.kernel.org/r/20220531205447.99236-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 11049c9e 1c5de097
...@@ -571,18 +571,32 @@ static int _next_phys_dev(struct mlx5_core_dev *mdev, ...@@ -571,18 +571,32 @@ static int _next_phys_dev(struct mlx5_core_dev *mdev,
return 1; return 1;
} }
static void *pci_get_other_drvdata(struct device *this, struct device *other)
{
if (this->driver != other->driver)
return NULL;
return pci_get_drvdata(to_pci_dev(other));
}
static int next_phys_dev(struct device *dev, const void *data) static int next_phys_dev(struct device *dev, const void *data)
{ {
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
struct mlx5_core_dev *mdev = madev->mdev;
mdev = pci_get_other_drvdata(this->device, dev);
if (!mdev)
return 0;
return _next_phys_dev(mdev, data); return _next_phys_dev(mdev, data);
} }
static int next_phys_dev_lag(struct device *dev, const void *data) static int next_phys_dev_lag(struct device *dev, const void *data)
{ {
struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
struct mlx5_core_dev *mdev = madev->mdev;
mdev = pci_get_other_drvdata(this->device, dev);
if (!mdev)
return 0;
if (!MLX5_CAP_GEN(mdev, vport_group_manager) || if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
!MLX5_CAP_GEN(mdev, lag_master) || !MLX5_CAP_GEN(mdev, lag_master) ||
...@@ -596,19 +610,17 @@ static int next_phys_dev_lag(struct device *dev, const void *data) ...@@ -596,19 +610,17 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
int (*match)(struct device *dev, const void *data)) int (*match)(struct device *dev, const void *data))
{ {
struct auxiliary_device *adev; struct device *next;
struct mlx5_adev *madev;
if (!mlx5_core_is_pf(dev)) if (!mlx5_core_is_pf(dev))
return NULL; return NULL;
adev = auxiliary_find_device(NULL, dev, match); next = bus_find_device(&pci_bus_type, NULL, dev, match);
if (!adev) if (!next)
return NULL; return NULL;
madev = container_of(adev, struct mlx5_adev, adev); put_device(next);
put_device(&adev->dev); return pci_get_drvdata(to_pci_dev(next));
return madev->mdev;
} }
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
......
...@@ -764,6 +764,7 @@ struct mlx5e_rq { ...@@ -764,6 +764,7 @@ struct mlx5e_rq {
u8 wq_type; u8 wq_type;
u32 rqn; u32 rqn;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5e_channel *channel;
u32 umr_mkey; u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow; struct mlx5e_dma_info wqe_overflow;
...@@ -1076,6 +1077,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq); ...@@ -1076,6 +1077,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
void mlx5e_trigger_napi_sched(struct napi_struct *napi);
int mlx5e_open_channels(struct mlx5e_priv *priv, int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs); struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs); void mlx5e_close_channels(struct mlx5e_channels *chs);
......
...@@ -12,6 +12,7 @@ struct mlx5e_post_act; ...@@ -12,6 +12,7 @@ struct mlx5e_post_act;
enum { enum {
MLX5E_TC_FT_LEVEL = 0, MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL, MLX5E_TC_TTC_FT_LEVEL,
MLX5E_TC_MISS_LEVEL,
}; };
struct mlx5e_tc_table { struct mlx5e_tc_table {
...@@ -20,6 +21,7 @@ struct mlx5e_tc_table { ...@@ -20,6 +21,7 @@ struct mlx5e_tc_table {
*/ */
struct mutex t_lock; struct mutex t_lock;
struct mlx5_flow_table *t; struct mlx5_flow_table *t;
struct mlx5_flow_table *miss_t;
struct mlx5_fs_chains *chains; struct mlx5_fs_chains *chains;
struct mlx5e_post_act *post_act; struct mlx5e_post_act *post_act;
......
...@@ -736,6 +736,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) ...@@ -736,6 +736,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv); mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq); mlx5e_activate_rq(&c->rq);
mlx5e_trigger_napi_sched(&c->napi);
} }
} }
......
...@@ -123,6 +123,8 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) ...@@ -123,6 +123,8 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
xskrq->stats->recover++; xskrq->stats->recover++;
} }
mlx5e_trigger_napi_icosq(icosq->channel);
mutex_unlock(&icosq->channel->icosq_recovery_lock); mutex_unlock(&icosq->channel->icosq_recovery_lock);
return 0; return 0;
...@@ -166,6 +168,10 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) ...@@ -166,6 +168,10 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq); mlx5e_activate_rq(rq);
rq->stats->recover++; rq->stats->recover++;
if (rq->channel)
mlx5e_trigger_napi_icosq(rq->channel);
else
mlx5e_trigger_napi_sched(rq->cq.napi);
return 0; return 0;
out: out:
clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state); clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
......
...@@ -715,7 +715,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -715,7 +715,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh, struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat) u8 zone_restore_id, bool nat_table, bool has_nat)
{ {
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS); DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr); DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
...@@ -731,11 +731,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -731,11 +731,12 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
&attr->ct_attr.ct_labels_id); &attr->ct_attr.ct_labels_id);
if (err) if (err)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (nat) { if (nat_table) {
err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, if (has_nat) {
&mod_acts); err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
if (err) if (err)
goto err_mapping; goto err_mapping;
}
ct_state |= MLX5_CT_STATE_NAT_BIT; ct_state |= MLX5_CT_STATE_NAT_BIT;
} }
...@@ -750,7 +751,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -750,7 +751,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err) if (err)
goto err_mapping; goto err_mapping;
if (nat) { if (nat_table && has_nat) {
attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type, attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
mod_acts.num_actions, mod_acts.num_actions,
mod_acts.actions); mod_acts.actions);
...@@ -818,7 +819,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -818,7 +819,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh, &zone_rule->mh,
zone_restore_id, nat); zone_restore_id,
nat,
mlx5_tc_ct_entry_has_nat(entry));
if (err) { if (err) {
ct_dbg("Failed to create ct entry mod hdr"); ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr; goto err_mod_hdr;
......
...@@ -179,6 +179,7 @@ static void mlx5e_activate_trap(struct mlx5e_trap *trap) ...@@ -179,6 +179,7 @@ static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{ {
napi_enable(&trap->napi); napi_enable(&trap->napi);
mlx5e_activate_rq(&trap->rq); mlx5e_activate_rq(&trap->rq);
mlx5e_trigger_napi_sched(&trap->napi);
} }
void mlx5e_deactivate_trap(struct mlx5e_priv *priv) void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
......
...@@ -117,6 +117,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, ...@@ -117,6 +117,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
goto err_remove_pool; goto err_remove_pool;
mlx5e_activate_xsk(c); mlx5e_activate_xsk(c);
mlx5e_trigger_napi_icosq(c);
/* Don't wait for WQEs, because the newer xdpsock sample doesn't provide /* Don't wait for WQEs, because the newer xdpsock sample doesn't provide
* any Fill Ring entries at the setup stage. * any Fill Ring entries at the setup stage.
......
...@@ -64,6 +64,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, ...@@ -64,6 +64,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->clock = &mdev->clock; rq->clock = &mdev->clock;
rq->icosq = &c->icosq; rq->icosq = &c->icosq;
rq->ix = c->ix; rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev; rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq; rq->xdpsq = &c->rq_xdpsq;
...@@ -179,10 +180,6 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) ...@@ -179,10 +180,6 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
mlx5e_reporter_icosq_resume_recovery(c); mlx5e_reporter_icosq_resume_recovery(c);
/* TX queue is created active. */ /* TX queue is created active. */
spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
spin_unlock_bh(&c->async_icosq_lock);
} }
void mlx5e_deactivate_xsk(struct mlx5e_channel *c) void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
......
...@@ -475,6 +475,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param ...@@ -475,6 +475,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->clock = &mdev->clock; rq->clock = &mdev->clock;
rq->icosq = &c->icosq; rq->icosq = &c->icosq;
rq->ix = c->ix; rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev; rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq; rq->xdpsq = &c->rq_xdpsq;
...@@ -1066,13 +1067,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, ...@@ -1066,13 +1067,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
void mlx5e_activate_rq(struct mlx5e_rq *rq) void mlx5e_activate_rq(struct mlx5e_rq *rq)
{ {
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
} else {
local_bh_disable();
napi_schedule(rq->cq.napi);
local_bh_enable();
}
} }
void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
...@@ -2227,6 +2221,20 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu) ...@@ -2227,6 +2221,20 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
return 0; return 0;
} }
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)
{
local_bh_disable();
napi_schedule(napi);
local_bh_enable();
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam, struct mlx5e_channel_param *cparam,
...@@ -2308,6 +2316,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) ...@@ -2308,6 +2316,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c); mlx5e_activate_xsk(c);
mlx5e_trigger_napi_icosq(c);
} }
static void mlx5e_deactivate_channel(struct mlx5e_channel *c) static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
...@@ -4559,6 +4569,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) ...@@ -4559,6 +4569,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
unlock: unlock:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
/* Need to fix some features. */
if (!err)
netdev_update_features(netdev);
return err; return err;
} }
......
...@@ -4714,6 +4714,33 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev) ...@@ -4714,6 +4714,33 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
return tc_tbl_size; return tc_tbl_size;
} }
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_TC_MISS_LEVEL;
ft_attr.prio = 0;
ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
*ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(*ft)) {
err = PTR_ERR(*ft);
netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
}
return err;
}
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
mlx5_destroy_flow_table(priv->fs.tc.miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv) int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
...@@ -4746,19 +4773,23 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -4746,19 +4773,23 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
} }
tc->mapping = chains_mapping; tc->mapping = chains_mapping;
err = mlx5e_tc_nic_create_miss_table(priv);
if (err)
goto err_chains;
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); attr.default_ft = priv->fs.tc.miss_t;
attr.mapping = chains_mapping; attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr); tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) { if (IS_ERR(tc->chains)) {
err = PTR_ERR(tc->chains); err = PTR_ERR(tc->chains);
goto err_chains; goto err_miss;
} }
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
...@@ -4781,6 +4812,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -4781,6 +4812,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mlx5_tc_ct_clean(tc->ct); mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act); mlx5e_tc_post_act_destroy(tc->post_act);
mlx5_chains_destroy(tc->chains); mlx5_chains_destroy(tc->chains);
err_miss:
mlx5e_tc_nic_destroy_miss_table(priv);
err_chains: err_chains:
mapping_destroy(chains_mapping); mapping_destroy(chains_mapping);
err_mapping: err_mapping:
...@@ -4821,6 +4854,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) ...@@ -4821,6 +4854,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_tc_post_act_destroy(tc->post_act); mlx5e_tc_post_act_destroy(tc->post_act);
mapping_destroy(tc->mapping); mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains); mlx5_chains_destroy(tc->chains);
mlx5e_tc_nic_destroy_miss_table(priv);
} }
int mlx5e_tc_ht_init(struct rhashtable *tc_ht) int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
......
...@@ -114,7 +114,7 @@ ...@@ -114,7 +114,7 @@
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define KERNEL_NIC_TC_NUM_PRIOS 1 #define KERNEL_NIC_TC_NUM_PRIOS 1
#define KERNEL_NIC_TC_NUM_LEVELS 2 #define KERNEL_NIC_TC_NUM_LEVELS 3
#define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1 #define ANCHOR_NUM_PRIOS 1
......
...@@ -44,11 +44,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns, ...@@ -44,11 +44,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action); err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
if (err && action) { if (err && action) {
err = mlx5dr_action_destroy(action); err = mlx5dr_action_destroy(action);
if (err) { if (err)
mlx5_core_err(ns->dev,
"Failed to destroy action (%d)\n", err);
action = NULL; action = NULL;
mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
err);
}
} }
ft->fs_dr_table.miss_action = action; ft->fs_dr_table.miss_action = action;
if (old_miss_action) { if (old_miss_action) {
......
...@@ -5176,12 +5176,11 @@ struct mlx5_ifc_query_qp_out_bits { ...@@ -5176,12 +5176,11 @@ struct mlx5_ifc_query_qp_out_bits {
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 reserved_at_40[0x20]; u8 reserved_at_40[0x40];
u8 ece[0x20];
u8 opt_param_mask[0x20]; u8 opt_param_mask[0x20];
u8 reserved_at_a0[0x20]; u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc; struct mlx5_ifc_qpc_bits qpc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment