Commit ed50edfb authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-next' into rdma.git

From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

mlx5 updates taken for dependencies on following patches.

* branche 'mlx5-next': (23 commits)
  IB/mlx5: Introduce uid as part of alloc/dealloc transport domain
  net/mlx5: Add shared Q counter bits
  net/mlx5: Continue driver initialization despite debugfs failure
  net/mlx5: Fold the modify lag code into function
  net/mlx5: Add lag affinity info to log
  net/mlx5: Split the activate lag function into two routines
  net/mlx5: E-Switch, Introduce flow counter affinity
  IB/mlx5: Unify e-switch representors load approach between uplink and VFs
  net/mlx5: Use lowercase 'X' for hex values
  net/mlx5: Remove duplicated include from eswitch.c
  net/mlx5: Remove the get protocol device interface entry
  net/mlx5: Support extended destination format in flow steering command
  net/mlx5: E-Switch, Change vhca id valid bool field to bit flag
  net/mlx5: Introduce extended destination fields
  net/mlx5: Revise gre and nvgre key formats
  net/mlx5: Add monitor commands layout and event data
  net/mlx5: Add support for plugged-disabled cable status in PME
  net/mlx5: Add support for PCIe power slot exceeded error in PME
  net/mlx5: Rework handling of port module events
  net/mlx5: Move flow counters data structures from flow steering header
  ...
parents bd1c24cc 71bef2fd
...@@ -82,7 +82,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) ...@@ -82,7 +82,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
return cqe; return cqe;
} else { } else {
...@@ -197,7 +197,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -197,7 +197,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
} }
wc->byte_len = be32_to_cpu(cqe->byte_cnt); wc->byte_len = be32_to_cpu(cqe->byte_cnt);
switch (cqe->op_own >> 4) { switch (get_cqe_opcode(cqe)) {
case MLX5_CQE_RESP_WR_IMM: case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM; wc->wc_flags = IB_WC_WITH_IMM;
...@@ -446,7 +446,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, ...@@ -446,7 +446,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
*/ */
rmb(); rmb();
opcode = cqe64->op_own >> 4; opcode = get_cqe_opcode(cqe64);
if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
if (likely(cq->resize_buf)) { if (likely(cq->resize_buf)) {
free_cq_buf(dev, &cq->buf); free_cq_buf(dev, &cq->buf);
...@@ -1203,7 +1203,7 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq) ...@@ -1203,7 +1203,7 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
return -EINVAL; return -EINVAL;
} }
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
(i + 1) & cq->resize_buf->nent); (i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64; dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
......
...@@ -48,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = { ...@@ -48,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = {
static int static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{ {
struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
if (!__mlx5_ib_add(ibdev, ibdev->profile))
return -EINVAL;
return 0; return 0;
} }
static void static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep) mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{ {
rep->rep_if[REP_IB].priv = NULL; struct mlx5_ib_dev *ibdev;
ibdev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
} }
static int static int
...@@ -89,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -89,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
dev = mlx5_ib_rep_to_dev(rep); dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL; rep->rep_if[REP_IB].priv = NULL;
ib_dealloc_device(&dev->ib_dev);
} }
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep) static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
......
...@@ -2681,11 +2681,11 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, ...@@ -2681,11 +2681,11 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
ntohs(ib_spec->gre.val.protocol)); ntohs(ib_spec->gre.val.protocol));
memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c, memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
gre_key_h), gre_key.nvgre.hi),
&ib_spec->gre.mask.key, &ib_spec->gre.mask.key,
sizeof(ib_spec->gre.mask.key)); sizeof(ib_spec->gre.mask.key));
memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v, memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
gre_key_h), gre_key.nvgre.hi),
&ib_spec->gre.val.key, &ib_spec->gre.val.key,
sizeof(ib_spec->gre.val.key)); sizeof(ib_spec->gre.val.key));
break; break;
...@@ -6250,18 +6250,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) ...@@ -6250,18 +6250,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev); cancel_delay_drop(dev);
} }
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
return 0;
}
static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_unregister_vport_reps(dev);
}
static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{ {
dev->mdev_events.notifier_call = mlx5_ib_event; dev->mdev_events.notifier_call = mlx5_ib_event;
...@@ -6300,8 +6288,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev, ...@@ -6300,8 +6288,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup) if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev); profile->stage[stage].cleanup(dev);
} }
ib_dealloc_device((struct ib_device *)dev);
} }
void *__mlx5_ib_add(struct mlx5_ib_dev *dev, void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
...@@ -6429,9 +6415,6 @@ static const struct mlx5_ib_profile nic_rep_profile = { ...@@ -6429,9 +6415,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init, mlx5_ib_stage_post_ib_reg_umr_init,
NULL), NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
}; };
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev) static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
...@@ -6499,8 +6482,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -6499,8 +6482,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (MLX5_ESWITCH_MANAGER(mdev) && if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
dev->profile = &nic_rep_profile;
return __mlx5_ib_add(dev, &nic_rep_profile); mlx5_ib_register_vport_reps(dev);
return dev;
} }
return __mlx5_ib_add(dev, &pf_profile); return __mlx5_ib_add(dev, &pf_profile);
...@@ -6522,7 +6506,12 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) ...@@ -6522,7 +6506,12 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
} }
dev = context; dev = context;
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); if (dev->profile == &nic_rep_profile)
mlx5_ib_unregister_vport_reps(dev);
else
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
ib_dealloc_device((struct ib_device *)dev);
} }
static struct mlx5_interface mlx5_ib_interface = { static struct mlx5_interface mlx5_ib_interface = {
......
...@@ -789,7 +789,6 @@ enum mlx5_ib_stages { ...@@ -789,7 +789,6 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_POST_IB_REG_UMR, MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR, MLX5_IB_STAGE_CLASS_ATTR,
MLX5_IB_STAGE_REP_REG,
MLX5_IB_STAGE_MAX, MLX5_IB_STAGE_MAX,
}; };
......
...@@ -373,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -373,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_MONITOR_COUNTER:
case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
...@@ -522,6 +524,8 @@ const char *mlx5_command_str(int command) ...@@ -522,6 +524,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
......
...@@ -256,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) ...@@ -256,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
mutex_unlock(&mlx5_intf_mutex); mutex_unlock(&mlx5_intf_mutex);
} }
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{
struct mlx5_priv *priv = &mdev->priv;
struct mlx5_device_context *dev_ctx;
unsigned long flags;
void *result = NULL;
spin_lock_irqsave(&priv->ctx_lock, flags);
list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
if ((dev_ctx->intf->protocol == protocol) &&
dev_ctx->intf->get_dev) {
result = dev_ctx->intf->get_dev(dev_ctx->context);
break;
}
spin_unlock_irqrestore(&priv->ctx_lock, flags);
return result;
}
EXPORT_SYMBOL(mlx5_get_protocol_dev);
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{ {
......
...@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p, ...@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p,
PRINT_MASKED_VAL(name, p, format); \ PRINT_MASKED_VAL(name, p, format); \
} }
DECLARE_MASK_VAL(u64, gre_key) = { DECLARE_MASK_VAL(u64, gre_key) = {
.m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 | .m = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi) << 8 |
MLX5_GET(fte_match_set_misc, mask, gre_key_l), MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo),
.v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 | .v = MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.hi) << 8 |
MLX5_GET(fte_match_set_misc, value, gre_key_l)}; MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.lo)};
PRINT_MASKED_VAL(gre_key, p, "%llu"); PRINT_MASKED_VAL(gre_key, p, "%llu");
PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u"); PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
......
...@@ -5185,20 +5185,12 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -5185,20 +5185,12 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
kfree(ppriv); kfree(ppriv);
} }
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
return priv->netdev;
}
static struct mlx5_interface mlx5e_interface = { static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add, .add = mlx5e_add,
.remove = mlx5e_remove, .remove = mlx5e_remove,
.attach = mlx5e_attach, .attach = mlx5e_attach,
.detach = mlx5e_detach, .detach = mlx5e_detach,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH, .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
}; };
void mlx5e_init(void) void mlx5e_init(void)
......
...@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, ...@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
return; return;
} }
...@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */ prefetchw(va); /* xdp_frame data area */
prefetch(data); prefetch(data);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
return NULL; return NULL;
} }
...@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen; u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb; struct sk_buff *skb;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
return NULL; return NULL;
} }
...@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides; wi->consumed_strides += cstrides;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
} }
......
...@@ -1089,13 +1089,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) ...@@ -1089,13 +1089,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
} }
static const struct counter_desc mlx5e_pme_status_desc[] = { static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 }, { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
}; };
static const struct counter_desc mlx5e_pme_error_desc[] = { static const struct counter_desc mlx5e_pme_error_desc[] = {
{ "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
{ "module_high_temp", 48 }, /* high temperature */ { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
{ "module_bad_shorted", 56 }, /* bad or shorted cable/module */ { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
}; };
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
......
...@@ -507,7 +507,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -507,7 +507,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) { &sq->state)) {
mlx5e_dump_error_cqe(sq, mlx5e_dump_error_cqe(sq,
......
...@@ -527,6 +527,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) ...@@ -527,6 +527,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
return async_event_mask; return async_event_mask;
} }
......
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#include "lib/eq.h" #include "lib/eq.h"
#include "eswitch.h" #include "eswitch.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/eq.h"
#define UPLINK_VPORT 0xFFFF #define UPLINK_VPORT 0xFFFF
......
...@@ -125,8 +125,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -125,8 +125,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].vport.num = attr->out_rep[j]->vport; dest[i].vport.num = attr->out_rep[j]->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
dest[i].vport.vhca_id_valid = if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
!!MLX5_CAP_ESW(esw->dev, merged_eswitch); dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
i++; i++;
} }
} }
...@@ -220,7 +221,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -220,7 +221,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.num = attr->out_rep[i]->vport; dest[i].vport.num = attr->out_rep[i]->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
} }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb, dest[i].ft = fwd_fdb,
......
...@@ -117,6 +117,8 @@ static const char *eqe_type_str(u8 type) ...@@ -117,6 +117,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
case MLX5_EVENT_TYPE_GENERAL_EVENT: case MLX5_EVENT_TYPE_GENERAL_EVENT:
return "MLX5_EVENT_TYPE_GENERAL_EVENT"; return "MLX5_EVENT_TYPE_GENERAL_EVENT";
case MLX5_EVENT_TYPE_MONITOR_COUNTER:
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER: case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER"; return "MLX5_EVENT_TYPE_DEVICE_TRACER";
default: default:
...@@ -157,23 +159,47 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) ...@@ -157,23 +159,47 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
} }
/* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */ /* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = { static const char *mlx5_pme_status_to_string(enum port_module_event_status_type status)
"Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */ {
"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */ switch (status) {
"Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */ case MLX5_MODULE_STATUS_PLUGGED:
}; return "Cable plugged";
case MLX5_MODULE_STATUS_UNPLUGGED:
return "Cable unplugged";
case MLX5_MODULE_STATUS_ERROR:
return "Cable error";
case MLX5_MODULE_STATUS_DISABLED:
return "Cable disabled";
default:
return "Unknown status";
}
}
static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = { static const char *mlx5_pme_error_to_string(enum port_module_event_error_type error)
"Power budget exceeded", {
"Long Range for non MLNX cable", switch (error) {
"Bus stuck(I2C or data shorted)", case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
"No EEPROM/retry timeout", return "Power budget exceeded";
"Enforce part number list", case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX:
"Unknown identifier", return "Long Range for non MLNX cable";
"High Temperature", case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
"Bad or shorted cable/module", return "Bus stuck (I2C or data shorted)";
"Unknown status", case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
}; return "No EEPROM/retry timeout";
case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
return "Enforce part number list";
case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER:
return "Unknown identifier";
case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
return "High Temperature";
case MLX5_MODULE_EVENT_ERROR_BAD_CABLE:
return "Bad or shorted cable/module";
case MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED:
return "One or more network ports have been powered down due to insufficient/unadvertised power on the PCIe slot";
default:
return "Unknown error";
}
}
/* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */ /* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
static int port_module(struct notifier_block *nb, unsigned long type, void *data) static int port_module(struct notifier_block *nb, unsigned long type, void *data)
...@@ -185,6 +211,7 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data ...@@ -185,6 +211,7 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
enum port_module_event_status_type module_status; enum port_module_event_status_type module_status;
enum port_module_event_error_type error_type; enum port_module_event_error_type error_type;
struct mlx5_eqe_port_module *module_event_eqe; struct mlx5_eqe_port_module *module_event_eqe;
const char *status_str, *error_str;
u8 module_num; u8 module_num;
module_event_eqe = &eqe->data.port_module; module_event_eqe = &eqe->data.port_module;
...@@ -193,28 +220,28 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data ...@@ -193,28 +220,28 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
PORT_MODULE_EVENT_MODULE_STATUS_MASK; PORT_MODULE_EVENT_MODULE_STATUS_MASK;
error_type = module_event_eqe->error_type & error_type = module_event_eqe->error_type &
PORT_MODULE_EVENT_ERROR_TYPE_MASK; PORT_MODULE_EVENT_ERROR_TYPE_MASK;
if (module_status < MLX5_MODULE_STATUS_ERROR) {
events->pme_stats.status_counters[module_status - 1]++; if (module_status < MLX5_MODULE_STATUS_NUM)
} else if (module_status == MLX5_MODULE_STATUS_ERROR) { events->pme_stats.status_counters[module_status]++;
if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN) status_str = mlx5_pme_status_to_string(module_status);
/* Unknown error type */
error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN; if (module_status == MLX5_MODULE_STATUS_ERROR) {
events->pme_stats.error_counters[error_type]++; if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
events->pme_stats.error_counters[error_type]++;
error_str = mlx5_pme_error_to_string(error_type);
} }
if (!printk_ratelimit()) if (!printk_ratelimit())
return NOTIFY_OK; return NOTIFY_OK;
if (module_status < MLX5_MODULE_STATUS_ERROR) if (module_status == MLX5_MODULE_STATUS_ERROR)
mlx5_core_err(events->dev,
"Port module event[error]: module %u, %s, %s\n",
module_num, status_str, error_str);
else
mlx5_core_info(events->dev, mlx5_core_info(events->dev,
"Port module event: module %u, %s\n", "Port module event: module %u, %s\n",
module_num, mlx5_pme_status[module_status - 1]); module_num, status_str);
else if (module_status == MLX5_MODULE_STATUS_ERROR)
mlx5_core_info(events->dev,
"Port module event[error]: module %u, %s, %s\n",
module_num, mlx5_pme_status[module_status - 1],
mlx5_pme_error[error_type]);
return NOTIFY_OK; return NOTIFY_OK;
} }
......
...@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, ...@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
{ {
u8 opcode, status = 0; u8 opcode, status = 0;
opcode = cqe->op_own >> 4; opcode = get_cqe_opcode(cqe);
switch (opcode) { switch (opcode) {
case MLX5_CQE_REQ_ERR: case MLX5_CQE_REQ_ERR:
......
...@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, ...@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
struct fs_fte *fte, bool *extended_dest)
{
int fw_log_max_fdb_encap_uplink =
MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
int num_fwd_destinations = 0;
struct mlx5_flow_rule *dst;
int num_encap = 0;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
}
if (num_fwd_destinations > 1 && num_encap > 0)
*extended_dest = true;
if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
mlx5_core_warn(dev, "FW does not support extended destination");
return -EOPNOTSUPP;
}
if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
mlx5_core_warn(dev, "FW does not support more than %d encaps",
1 << fw_log_max_fdb_encap_uplink);
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask, int opmod, int modify_mask,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned group_id, unsigned group_id,
struct fs_fte *fte) struct fs_fte *fte)
{ {
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
bool extended_dest = false;
struct mlx5_flow_rule *dst; struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan; void *in_flow_context, *vlan;
void *in_match_value; void *in_match_value;
unsigned int inlen;
int dst_cnt_size;
void *in_dests; void *in_dests;
u32 *in; u32 *in;
int err; int err;
if (mlx5_set_extended_dest(dev, fte, &extended_dest))
return -EOPNOTSUPP;
if (!extended_dest)
dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action); MLX5_SET(flow_context, in_flow_context, extended_destination,
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, extended_dest);
fte->action.reformat_id); if (extended_dest) {
u32 action;
action = fte->action.action &
~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
} else {
MLX5_SET(flow_context, in_flow_context, action,
fte->action.action);
MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
fte->action.reformat_id);
}
MLX5_SET(flow_context, in_flow_context, modify_header_id, MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id); fte->action.modify_id);
...@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.vport.num; id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, destination_eswitch_owner_vhca_id_valid,
dst->dest_attr.vport.vhca_id_valid); !!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id, destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id); dst->dest_attr.vport.vhca_id);
if (extended_dest) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
dst->dest_attr.vport.reformat_id);
}
break; break;
default: default:
id = dst->dest_attr.tir_num; id = dst->dest_attr.tir_num;
...@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests, destination_type, MLX5_SET(dest_format_struct, in_dests, destination_type,
type); type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id); MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); in_dests += dst_cnt_size;
list_size++; list_size++;
} }
...@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_counter_list, in_dests, flow_counter_id, MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter_id); dst->dest_attr.counter_id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); in_dests += dst_cnt_size;
list_size++; list_size++;
} }
if (list_size > max_list_size) { if (list_size > max_list_size) {
......
...@@ -145,29 +145,6 @@ struct mlx5_flow_table { ...@@ -145,29 +145,6 @@ struct mlx5_flow_table {
struct rhltable fgs_hash; struct rhltable fgs_hash;
}; };
struct mlx5_fc_cache {
u64 packets;
u64 bytes;
u64 lastuse;
};
struct mlx5_fc {
struct list_head list;
struct llist_node addlist;
struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since
* last reading
*/
u64 lastpackets;
u64 lastbytes;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
struct mlx5_ft_underlay_qp { struct mlx5_ft_underlay_qp {
struct list_head list; struct list_head list;
u32 qpn; u32 qpn;
......
...@@ -41,6 +41,29 @@ ...@@ -41,6 +41,29 @@
/* Max number of counters to query in bulk read is 32K */ /* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15) #define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
struct mlx5_fc_cache {
u64 packets;
u64 bytes;
u64 lastuse;
};
struct mlx5_fc {
struct list_head list;
struct llist_node addlist;
struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since
* last reading
*/
u64 lastpackets;
u64 lastbytes;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
/* locking scheme: /* locking scheme:
* *
* It is the responsibility of the user to prevent concurrent calls or bad * It is the responsibility of the user to prevent concurrent calls or bad
......
...@@ -186,22 +186,57 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, ...@@ -186,22 +186,57 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1; *port2 = 1;
} }
static void mlx5_activate_lag(struct mlx5_lag *ldev, static void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
u8 v2p_port1, v2p_port2;
int err; int err;
ldev->flags |= MLX5_LAG_FLAG_BONDED; mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
&v2p_port2);
if (v2p_port1 != ldev->v2p_map[0] ||
v2p_port2 != ldev->v2p_map[1]) {
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
}
}
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
&ldev->v2p_map[1]); &ldev->v2p_map[1]);
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
ldev->v2p_map[0], ldev->v2p_map[1]);
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
if (err) if (err)
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to create LAG (%d)\n", "Failed to create LAG (%d)\n",
err); err);
return err;
}
static void mlx5_activate_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
ldev->flags |= MLX5_LAG_FLAG_BONDED;
mlx5_create_lag(ldev, tracker);
} }
static void mlx5_deactivate_lag(struct mlx5_lag *ldev) static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
...@@ -223,8 +258,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -223,8 +258,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev; struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
struct lag_tracker tracker; struct lag_tracker tracker;
u8 v2p_port1, v2p_port2; int i;
int i, err;
bool do_bond; bool do_bond;
if (!dev0 || !dev1) if (!dev0 || !dev1)
...@@ -246,20 +280,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -246,20 +280,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_enable_roce(dev1); mlx5_nic_vport_enable_roce(dev1);
} else if (do_bond && mlx5_lag_is_bonded(ldev)) { } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, mlx5_modify_lag(ldev, &tracker);
&v2p_port2);
if ((v2p_port1 != ldev->v2p_map[0]) ||
(v2p_port2 != ldev->v2p_map[1])) {
ldev->v2p_map[0] = v2p_port1;
ldev->v2p_map[1] = v2p_port2;
err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
if (err)
mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n",
err);
}
} else if (!do_bond && mlx5_lag_is_bonded(ldev)) { } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_nic_vport_disable_roce(dev1); mlx5_nic_vport_disable_roce(dev1);
......
...@@ -51,19 +51,20 @@ enum port_module_event_status_type { ...@@ -51,19 +51,20 @@ enum port_module_event_status_type {
MLX5_MODULE_STATUS_PLUGGED = 0x1, MLX5_MODULE_STATUS_PLUGGED = 0x1,
MLX5_MODULE_STATUS_UNPLUGGED = 0x2, MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
MLX5_MODULE_STATUS_ERROR = 0x3, MLX5_MODULE_STATUS_ERROR = 0x3,
MLX5_MODULE_STATUS_NUM = 0x3, MLX5_MODULE_STATUS_DISABLED = 0x4,
MLX5_MODULE_STATUS_NUM,
}; };
enum port_module_event_error_type { enum port_module_event_error_type {
MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
MLX5_MODULE_EVENT_ERROR_BUS_STUCK, MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
MLX5_MODULE_EVENT_ERROR_BAD_CABLE, MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
MLX5_MODULE_EVENT_ERROR_UNKNOWN, MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
MLX5_MODULE_EVENT_ERROR_NUM, MLX5_MODULE_EVENT_ERROR_NUM,
}; };
......
...@@ -661,11 +661,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -661,11 +661,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
priv->numa_node = dev_to_node(&dev->pdev->dev); priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); if (mlx5_debugfs_root)
if (!priv->dbg_root) { priv->dbg_root =
dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
return -ENOMEM;
}
err = mlx5_pci_enable_device(dev); err = mlx5_pci_enable_device(dev);
if (err) { if (err) {
......
...@@ -177,7 +177,7 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) ...@@ -177,7 +177,7 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
return mlx5_cqwq_ctr2ix(wq, wq->cc); return mlx5_cqwq_ctr2ix(wq, wq->cc);
} }
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{ {
return mlx5_frag_buf_get_wqe(&wq->fbc, ix); return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
} }
......
...@@ -330,6 +330,7 @@ enum mlx5_event { ...@@ -330,6 +330,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
...@@ -781,6 +782,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) ...@@ -781,6 +782,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3; return (cqe->op_own >> 2) & 0x3;
} }
static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
{
return cqe->op_own >> 4;
}
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{ {
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
......
...@@ -1004,12 +1004,10 @@ struct mlx5_interface { ...@@ -1004,12 +1004,10 @@ struct mlx5_interface {
void (*remove)(struct mlx5_core_dev *dev, void *context); void (*remove)(struct mlx5_core_dev *dev, void *context);
int (*attach)(struct mlx5_core_dev *dev, void *context); int (*attach)(struct mlx5_core_dev *dev, void *context);
void (*detach)(struct mlx5_core_dev *dev, void *context); void (*detach)(struct mlx5_core_dev *dev, void *context);
void * (*get_dev)(void *context);
int protocol; int protocol;
struct list_head list; struct list_head list;
}; };
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf); int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
......
...@@ -86,6 +86,11 @@ struct mlx5_flow_spec { ...@@ -86,6 +86,11 @@ struct mlx5_flow_spec {
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
}; };
enum {
MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
struct mlx5_flow_destination { struct mlx5_flow_destination {
enum mlx5_flow_destination_type type; enum mlx5_flow_destination_type type;
union { union {
...@@ -96,7 +101,8 @@ struct mlx5_flow_destination { ...@@ -96,7 +101,8 @@ struct mlx5_flow_destination {
struct { struct {
u16 num; u16 num;
u16 vhca_id; u16 vhca_id;
bool vhca_id_valid; u32 reformat_id;
u8 flags;
} vport; } vport;
}; };
}; };
......
...@@ -85,6 +85,10 @@ enum { ...@@ -85,6 +85,10 @@ enum {
MLX5_OBJ_TYPE_UMEM = 0x0005, MLX5_OBJ_TYPE_UMEM = 0x0005,
}; };
enum {
MLX5_SHARED_RESOURCE_UID = 0xffff,
};
enum { enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101, MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
...@@ -164,6 +168,8 @@ enum { ...@@ -164,6 +168,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
...@@ -431,6 +437,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { ...@@ -431,6 +437,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
}; };
struct mlx5_ifc_nvgre_key_bits {
u8 hi[0x18];
u8 lo[0x8];
};
union mlx5_ifc_gre_key_bits {
struct mlx5_ifc_nvgre_key_bits nvgre;
u8 key[0x20];
};
struct mlx5_ifc_fte_match_set_misc_bits { struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 source_sqn[0x18]; u8 source_sqn[0x18];
...@@ -452,8 +468,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { ...@@ -452,8 +468,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc]; u8 reserved_at_64[0xc];
u8 gre_protocol[0x10]; u8 gre_protocol[0x10];
u8 gre_key_h[0x18]; union mlx5_ifc_gre_key_bits gre_key;
u8 gre_key_l[0x8];
u8 vxlan_vni[0x18]; u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8]; u8 reserved_at_b8[0x8];
...@@ -607,20 +622,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { ...@@ -607,20 +622,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800]; u8 reserved_at_800[0x7800];
}; };
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
};
struct mlx5_ifc_e_switch_cap_bits { struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1]; u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1]; u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1]; u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1]; u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x18]; u8 reserved_at_5[0x17];
u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1]; u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1]; u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1]; u8 nvgre_encap_decap[0x1];
u8 reserved_at_22[0x9]; u8 reserved_at_22[0x1];
u8 log_max_fdb_encap_uplink[0x5];
u8 reserved_at_21[0x3];
u8 log_max_packet_reformat_context[0x5]; u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6]; u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa]; u8 max_encap_header_size[0xa];
...@@ -1210,7 +1233,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1210,7 +1233,13 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 sw_owner_id[0x1]; u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x1]; u8 reserved_at_61f[0x1];
u8 reserved_at_620[0x80]; u8 max_num_of_monitor_counters[0x10];
u8 num_ppcnt_monitor_counters[0x10];
u8 reserved_at_640[0x10];
u8 num_q_monitor_counters[0x10];
u8 reserved_at_660[0x40];
u8 uctx_cap[0x20]; u8 uctx_cap[0x20];
...@@ -1230,8 +1259,10 @@ enum mlx5_flow_destination_type { ...@@ -1230,8 +1259,10 @@ enum mlx5_flow_destination_type {
struct mlx5_ifc_dest_format_struct_bits { struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8]; u8 destination_type[0x8];
u8 destination_id[0x18]; u8 destination_id[0x18];
u8 destination_eswitch_owner_vhca_id_valid[0x1]; u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 reserved_at_21[0xf]; u8 packet_reformat[0x1];
u8 reserved_at_22[0xe];
u8 destination_eswitch_owner_vhca_id[0x10]; u8 destination_eswitch_owner_vhca_id[0x10];
}; };
...@@ -1241,6 +1272,14 @@ struct mlx5_ifc_flow_counter_list_bits { ...@@ -1241,6 +1272,14 @@ struct mlx5_ifc_flow_counter_list_bits {
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
}; };
struct mlx5_ifc_extended_dest_format_bits {
struct mlx5_ifc_dest_format_struct_bits destination_entry;
u8 packet_reformat_id[0x20];
u8 reserved_at_60[0x20];
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list; struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
...@@ -2462,7 +2501,8 @@ struct mlx5_ifc_flow_context_bits { ...@@ -2462,7 +2501,8 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10]; u8 reserved_at_60[0x10];
u8 action[0x10]; u8 action[0x10];
u8 reserved_at_80[0x8]; u8 extended_destination[0x1];
u8 reserved_at_80[0x7];
u8 destination_list_size[0x18]; u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8]; u8 reserved_at_a0[0x8];
...@@ -3818,6 +3858,83 @@ enum { ...@@ -3818,6 +3858,83 @@ enum {
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
}; };
struct mlx5_ifc_arm_monitor_counter_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x20];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_arm_monitor_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
enum {
MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
};
enum mlx5_monitor_counter_ppcnt {
MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
};
enum {
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
};
struct mlx5_ifc_monitor_counter_output_bits {
u8 reserved_at_0[0x4];
u8 type[0x4];
u8 reserved_at_8[0x8];
u8 counter[0x10];
u8 counter_group_id[0x20];
};
#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
struct mlx5_ifc_set_monitor_counter_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x10];
u8 num_of_counters[0x10];
u8 reserved_at_60[0x20];
struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
};
struct mlx5_ifc_set_monitor_counter_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_vport_state_in_bits { struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -4683,7 +4800,7 @@ enum { ...@@ -4683,7 +4800,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3, MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
}; };
struct mlx5_ifc_query_flow_group_out_bits { struct mlx5_ifc_query_flow_group_out_bits {
...@@ -6589,7 +6706,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits { ...@@ -6589,7 +6706,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits {
struct mlx5_ifc_dealloc_transport_domain_in_bits { struct mlx5_ifc_dealloc_transport_domain_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -7442,7 +7559,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits { ...@@ -7442,7 +7559,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits {
struct mlx5_ifc_alloc_transport_domain_in_bits { struct mlx5_ifc_alloc_transport_domain_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -7464,7 +7581,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits { ...@@ -7464,7 +7581,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits {
struct mlx5_ifc_alloc_q_counter_in_bits { struct mlx5_ifc_alloc_q_counter_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment