Commit 68e18626 authored by Saeed Mahameed's avatar Saeed Mahameed

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Misc updates from mlx5-next branch.

1) Eli improves the handling of the support for QoS element type
2) Gavi refactors and prepares mlx5 flow counters for bulk allocation
support
3) Parav, refactors and improves E-Switch load/unload flows
4) Saeed, two misc cleanups
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents a8e600e2 5896b972
...@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ...@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/ */
dma_rmb(); dma_rmb();
if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
else
mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index; ++eq->cons_index;
...@@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) ...@@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ {
struct mlx5_eq_table *eqt = dev->priv.eq_table; struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
} }
EXPORT_SYMBOL(mlx5_eq_notifier_register); EXPORT_SYMBOL(mlx5_eq_notifier_register);
...@@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) ...@@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ {
struct mlx5_eq_table *eqt = dev->priv.eq_table; struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
} }
EXPORT_SYMBOL(mlx5_eq_notifier_unregister); EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
...@@ -58,20 +58,9 @@ struct vport_addr { ...@@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc; bool mc_promisc;
}; };
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
PROMISC_CHANGE = BIT(3),
};
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
struct mlx5_vport *__must_check struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{ {
...@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, ...@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1); MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
if (events_mask & UC_ADDR_CHANGE) if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx, MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1); event_on_uc_address_change, 1);
if (events_mask & MC_ADDR_CHANGE) if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx, MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1); event_on_mc_address_change, 1);
if (events_mask & PROMISC_CHANGE) if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx, MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1); event_on_promisc_change, 1);
...@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw) ...@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err; return err;
} }
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
static int esw_legacy_enable(struct mlx5_eswitch *esw)
{
int ret;
ret = esw_create_legacy_table(esw);
if (ret)
return ret;
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
return 0;
}
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{ {
esw_cleanup_vepa_rules(esw); esw_cleanup_vepa_rules(esw);
...@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) ...@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw); esw_destroy_legacy_vepa_table(esw);
} }
static void esw_legacy_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
mlx5_eswitch_disable_pf_vf_vports(esw);
mc_promisc = &esw->mc_promisc;
if (mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_legacy_table(esw);
}
/* E-Switch vport UC/MC lists management */ /* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr); struct vport_addr *vaddr);
...@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport) ...@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n", esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac); vport->vport, mac);
if (vport->enabled_events & UC_ADDR_CHANGE) { if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC); esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
} }
if (vport->enabled_events & MC_ADDR_CHANGE) if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
if (vport->enabled_events & PROMISC_CHANGE) { if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport); esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule)) if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport); esw_update_vport_mc_promisc(esw, vport);
} }
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC); esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport); esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
...@@ -1393,18 +1411,49 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1393,18 +1411,49 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return err; return err;
} }
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
{
struct mlx5_core_dev *dev = esw->dev = esw->dev;
switch (type) {
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_TASR;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
}
return false;
}
/* Vport QoS management */ /* Vport QoS management */
static int esw_create_tsar(struct mlx5_eswitch *esw) static void esw_create_tsar(struct mlx5_eswitch *esw)
{ {
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
__be32 *attr;
int err; int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return 0; return;
if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
return;
if (esw->qos.enabled) if (esw->qos.enabled)
return -EEXIST; return;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
*attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev, err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
...@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) ...@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id); &esw->qos.root_tsar_id);
if (err) { if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
return err; return;
} }
esw->qos.enabled = true; esw->qos.enabled = true;
return 0;
} }
static void esw_destroy_tsar(struct mlx5_eswitch *esw) static void esw_destroy_tsar(struct mlx5_eswitch *esw)
...@@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) ...@@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
} }
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
int enable_events) enum mlx5_eswitch_vport_event enabled_events)
{ {
u16 vport_num = vport->vport; u16 vport_num = vport->vport;
...@@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, ...@@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */ /* Sync with current vport context */
vport->enabled_events = enable_events; vport->enabled_events = enabled_events;
vport->enabled = true; vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
...@@ -1770,11 +1818,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw) ...@@ -1770,11 +1818,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_enable_vport(esw, vport, enabled_events);
/* Enable ECPF vports */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
esw_enable_vport(esw, vport, enabled_events);
}
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
* whichever are previously enabled on the eswitch.
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{ {
struct mlx5_vport *vport; struct mlx5_vport *vport;
int i;
mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw_disable_vport(esw, vport);
}
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int err; int err;
int i, enabled_events;
if (!ESW_ALLOWED(esw) || if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
...@@ -1788,44 +1871,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -1788,44 +1871,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n"); esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw_create_tsar(esw);
esw->mode = mode; esw->mode = mode;
mlx5_lag_update(esw->dev); mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) { if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_create_legacy_table(esw); err = esw_legacy_enable(esw);
if (err)
goto abort;
} else { } else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw); err = esw_offloads_enable(esw);
} }
if (err) if (err)
goto abort; goto abort;
err = esw_create_tsar(esw);
if (err)
esw_warn(esw->dev, "Failed to create eswitch TSAR");
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
UC_ADDR_CHANGE;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_enable_vport(esw, vport, enabled_events);
/* Enable ECPF vports */
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
esw_enable_vport(esw, vport, enabled_events);
}
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
mlx5_eswitch_event_handlers_register(esw); mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
...@@ -1847,10 +1909,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -1847,10 +1909,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw) void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{ {
struct esw_mc_addr *mc_promisc;
struct mlx5_vport *vport;
int old_mode; int old_mode;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE) if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return; return;
...@@ -1859,21 +1918,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw) ...@@ -1859,21 +1918,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports); esw->esw_funcs.num_vfs, esw->enabled_vports);
mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw); mlx5_eswitch_event_handlers_unregister(esw);
mlx5_esw_for_all_vports(esw, i, vport)
esw_disable_vport(esw, vport);
if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_tsar(esw);
if (esw->mode == MLX5_ESWITCH_LEGACY) if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_destroy_legacy_table(esw); esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS) else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_cleanup(esw); esw_offloads_disable(esw);
esw_destroy_tsar(esw);
old_mode = esw->mode; old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE; esw->mode = MLX5_ESWITCH_NONE;
......
...@@ -102,6 +102,13 @@ struct mlx5_vport_info { ...@@ -102,6 +102,13 @@ struct mlx5_vport_info {
bool trusted; bool trusted;
}; };
/* Vport context events */
enum mlx5_eswitch_vport_event {
MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
struct mlx5_vport { struct mlx5_vport {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
int vport; int vport;
...@@ -123,7 +130,7 @@ struct mlx5_vport { ...@@ -123,7 +130,7 @@ struct mlx5_vport {
} qos; } qos;
bool enabled; bool enabled;
u16 enabled_events; enum mlx5_eswitch_vport_event enabled_events;
}; };
enum offloads_fdb_flags { enum offloads_fdb_flags {
...@@ -208,8 +215,11 @@ enum { ...@@ -208,8 +215,11 @@ enum {
struct mlx5_eswitch { struct mlx5_eswitch {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_nb nb; struct mlx5_nb nb;
/* legacy data structures */
struct mlx5_eswitch_fdb fdb_table; struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
struct mlx5_vport *vports; struct mlx5_vport *vports;
u32 flags; u32 flags;
...@@ -219,7 +229,6 @@ struct mlx5_eswitch { ...@@ -219,7 +229,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes * and async SRIOV admin state changes
*/ */
struct mutex state_lock; struct mutex state_lock;
struct esw_mc_addr mc_promisc;
struct { struct {
bool enabled; bool enabled;
...@@ -234,8 +243,8 @@ struct mlx5_eswitch { ...@@ -234,8 +243,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs; struct mlx5_esw_functions esw_funcs;
}; };
void esw_offloads_cleanup(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw); int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
...@@ -514,6 +523,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); ...@@ -514,6 +523,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \ (vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++) (i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \ for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \ (vport) = &(esw)->vports[(i)], \
...@@ -575,6 +589,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); ...@@ -575,6 +589,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -594,38 +594,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) ...@@ -594,38 +594,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
} }
static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw) static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{ {
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id; u8 fdb_to_vport_reg_c_id;
int err; int err;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
out, sizeof(out)); return 0;
if (err)
return err;
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
in, sizeof(in));
}
static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out)); out, sizeof(out));
...@@ -635,6 +612,9 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) ...@@ -635,6 +612,9 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id); esw_vport_context.fdb_to_vport_reg_c_id);
if (enable)
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
else
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in, MLX5_SET(modify_esw_vport_context_in, in,
...@@ -2131,7 +2111,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type ...@@ -2131,7 +2111,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK; return NOTIFY_OK;
} }
int esw_offloads_init(struct mlx5_eswitch *esw) int esw_offloads_enable(struct mlx5_eswitch *esw)
{ {
int err; int err;
...@@ -2145,11 +2125,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2145,11 +2125,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err) if (err)
return err; return err;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { err = esw_set_passing_vport_metadata(esw, true);
err = mlx5_eswitch_enable_passing_vport_metadata(esw);
if (err) if (err)
goto err_vport_metadata; goto err_vport_metadata;
}
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw); err = esw_offloads_load_all_reps(esw);
if (err) if (err)
...@@ -2163,8 +2143,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2163,8 +2143,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0; return 0;
err_reps: err_reps:
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) mlx5_eswitch_disable_pf_vf_vports(esw);
mlx5_eswitch_disable_passing_vport_metadata(esw); esw_set_passing_vport_metadata(esw, false);
err_vport_metadata: err_vport_metadata:
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
return err; return err;
...@@ -2189,13 +2169,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -2189,13 +2169,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err; return err;
} }
void esw_offloads_cleanup(struct mlx5_eswitch *esw) void esw_offloads_disable(struct mlx5_eswitch *esw)
{ {
mlx5_rdma_disable_roce(esw->dev); mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw); esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw); esw_offloads_unload_all_reps(esw);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) mlx5_eswitch_disable_pf_vf_vports(esw);
mlx5_eswitch_disable_passing_vport_metadata(esw); esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw); esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
} }
......
...@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, ...@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{ {
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
...@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) ...@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER); MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err) if (!err)
...@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) ...@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err; return err;
} }
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
{
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{ {
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
...@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, ...@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0; return 0;
} }
struct mlx5_cmd_fc_bulk { int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
u32 id;
int num;
int outlen;
u32 out[0];
};
struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
{ {
struct mlx5_cmd_fc_bulk *b; return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
int outlen = MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
b->id = id;
b->num = num;
b->outlen = outlen;
return b;
} }
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
{ u32 *out)
kfree(b);
}
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
{ {
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode, MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes)
{
int index = id - b->id;
void *stats;
if (index < 0 || index >= b->num) {
mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
id, b->id, b->id + b->num - 1);
return;
}
stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
flow_statistics[index]);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
} }
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
......
...@@ -78,20 +78,16 @@ struct mlx5_flow_cmds { ...@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
}; };
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes); u64 *packets, u64 *bytes);
struct mlx5_cmd_fc_bulk; int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
struct mlx5_cmd_fc_bulk * u32 *out);
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type); const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
......
...@@ -75,7 +75,7 @@ struct mlx5_fc { ...@@ -75,7 +75,7 @@ struct mlx5_fc {
* access to counter list: * access to counter list:
* - create (user context) * - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by * - mlx5_fc_create() only adds to an addlist to be used by
* mlx5_fc_stats_query_work(). addlist is a lockless single linked list * mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single * that doesn't require any additional synchronization when adding single
* node. * node.
* - spawn thread to do the actual destroy * - spawn thread to do the actual destroy
...@@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev, ...@@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock); spin_unlock(&fc_stats->counters_idr_lock);
} }
/* The function returns the last counter that was queried so the caller static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
* function can continue calling it till all counters are queried. {
*/ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static void update_counter_cache(int index, u32 *bulk_raw_data,
struct mlx5_fc_cache *cache)
{
void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
flow_statistics[index]);
u64 packets = MLX5_GET64(traffic_counter, stats, packets);
u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
if (cache->packets == packets)
return;
cache->packets = packets;
cache->bytes = bytes;
cache->lastuse = jiffies;
}
static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
struct mlx5_fc *first, struct mlx5_fc *first,
u32 last_id) u32 last_id)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter = NULL; bool query_more_counters = (first->id <= last_id);
struct mlx5_cmd_fc_bulk *b; int max_bulk_len = get_max_bulk_query_len(dev);
bool more = false; u32 *data = fc_stats->bulk_query_out;
u32 afirst_id; struct mlx5_fc *counter = first;
int num; u32 bulk_base_id;
int bulk_len;
int err; int err;
int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, while (query_more_counters) {
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
/* first id must be aligned to 4 when using bulk query */ /* first id must be aligned to 4 when using bulk query */
afirst_id = first->id & ~0x3; bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */ /* number of counters to query inc. the last counter */
num = ALIGN(last_id - afirst_id + 1, 4); bulk_len = min_t(int, max_bulk_len,
if (num > max_bulk) { ALIGN(last_id - bulk_base_id + 1, 4));
num = max_bulk;
last_id = afirst_id + num - 1;
}
b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
if (!b) {
mlx5_core_err(dev, "Error allocating resources for bulk query\n");
return NULL;
}
err = mlx5_cmd_fc_bulk_query(dev, b); err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
data);
if (err) { if (err) {
mlx5_core_err(dev, "Error doing bulk query: %d\n", err); mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
goto out; return;
} }
query_more_counters = false;
counter = first;
list_for_each_entry_from(counter, &fc_stats->counters, list) { list_for_each_entry_from(counter, &fc_stats->counters, list) {
struct mlx5_fc_cache *c = &counter->cache; int counter_index = counter->id - bulk_base_id;
u64 packets; struct mlx5_fc_cache *cache = &counter->cache;
u64 bytes;
if (counter->id > last_id) { if (counter->id >= bulk_base_id + bulk_len) {
more = true; query_more_counters = true;
break; break;
} }
mlx5_cmd_fc_bulk_get(dev, b, update_counter_cache(counter_index, data, cache);
counter->id, &packets, &bytes); }
if (c->packets == packets)
continue;
c->packets = packets;
c->bytes = bytes;
c->lastuse = jiffies;
} }
out:
mlx5_cmd_fc_bulk_free(b);
return more ? counter : NULL;
} }
static void mlx5_free_fc(struct mlx5_core_dev *dev, static void mlx5_free_fc(struct mlx5_core_dev *dev,
...@@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work) ...@@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list); list);
while (counter) if (counter)
counter = mlx5_fc_stats_query(dev, counter, last->id); mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval; fc_stats->next_query = now + fc_stats->sampling_interval;
} }
...@@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy); ...@@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev) int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int max_bulk_len;
int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock); spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr); idr_init(&fc_stats->counters_idr);
...@@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist); init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist); init_llist_head(&fc_stats->dellist);
max_bulk_len = get_max_bulk_query_len(dev);
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq) if (!fc_stats->wq)
return -ENOMEM; goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD; fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
return 0; return 0;
err_wq_create:
kfree(fc_stats->bulk_query_out);
return -ENOMEM;
} }
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
...@@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) ...@@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq); destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL; dev->priv.fc_stats.wq = NULL;
kfree(fc_stats->bulk_query_out);
idr_destroy(&fc_stats->counters_idr); idr_destroy(&fc_stats->counters_idr);
tmplist = llist_del_all(&fc_stats->addlist); tmplist = llist_del_all(&fc_stats->addlist);
......
...@@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup) ...@@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{ {
int err = 0; int err = 0;
if (cleanup) if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev); mlx5_drain_health_wq(dev);
}
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
...@@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev); mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink); mlx5_devlink_unregister(devlink);
mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) { if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n"); mlx5_core_err(dev, "mlx5_unload_one failed\n");
......
...@@ -488,6 +488,7 @@ struct mlx5_fc_stats { ...@@ -488,6 +488,7 @@ struct mlx5_fc_stats {
struct delayed_work work; struct delayed_work work;
unsigned long next_query; unsigned long next_query;
unsigned long sampling_interval; /* jiffies */ unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
}; };
struct mlx5_events; struct mlx5_events;
......
...@@ -1040,6 +1040,21 @@ enum { ...@@ -1040,6 +1040,21 @@ enum {
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
}; };
#define MLX5_FC_BULK_SIZE_FACTOR 128
enum mlx5_fc_bulk_alloc_bitmask {
MLX5_FC_BULK_128 = (1 << 0),
MLX5_FC_BULK_256 = (1 << 1),
MLX5_FC_BULK_512 = (1 << 2),
MLX5_FC_BULK_1024 = (1 << 3),
MLX5_FC_BULK_2048 = (1 << 4),
MLX5_FC_BULK_4096 = (1 << 5),
MLX5_FC_BULK_8192 = (1 << 6),
MLX5_FC_BULK_16384 = (1 << 7),
};
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
struct mlx5_ifc_cmd_hca_cap_bits { struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30]; u8 reserved_at_0[0x30];
u8 vhca_id[0x10]; u8 vhca_id[0x10];
...@@ -1244,7 +1259,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1244,7 +1259,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2e0[0x7]; u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19]; u8 max_qp_mcg[0x19];
u8 reserved_at_300[0x18]; u8 reserved_at_300[0x10];
u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8]; u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3]; u8 reserved_at_320[0x3];
...@@ -2766,7 +2782,7 @@ struct mlx5_ifc_traffic_counter_bits { ...@@ -2766,7 +2782,7 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits { struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1]; u8 strict_lag_tx_port_affinity[0x1];
u8 tls_en[0x1]; u8 tls_en[0x1];
u8 reserved_at_1[0x2]; u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04]; u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4]; u8 reserved_at_8[0x4];
...@@ -2941,6 +2957,13 @@ enum { ...@@ -2941,6 +2957,13 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
}; };
enum {
ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
};
struct mlx5_ifc_scheduling_context_bits { struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8]; u8 element_type[0x8];
u8 reserved_at_8[0x18]; u8 reserved_at_8[0x18];
...@@ -7815,7 +7838,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits { ...@@ -7815,7 +7838,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x38];
u8 flow_counter_bulk[0x8];
}; };
struct mlx5_ifc_add_vxlan_udp_dport_out_bits { struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment