Commit 95302c39 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-12-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-12-11

From Eli Britstein,
Patches 1-10 adds remote mirroring support.
Patches 1-4 refactor encap related code as pre-steps for using per
destination encapsulation properties.
Patches 5-7 use extended destination feature for single/multi
destination scenarios that have a single encap destination.
Patches 8-10 enable multiple encap destinations for a TC flow.

From, Daniel Jurgens,
Patch 11, Use CQE padding for Ethernet CQs, PPC showed up to a 24%
improvement in small packet throughput

From Eyal Davidovich,
patches 12-14, FW monitor counter support
FW monitor counters feature came to solve the delayed reporting of
FW stats in the atomic get_stats64 ndo, since we can't access the
FW at that stage, this feature will enable immediate FW stats updates
in the driver via fw events on specific stats updates.

Patch 12, cleanup to avoid querying a FW counter when it is not
supported
Patch 13, Monitor counters FW commands support
Patch 14, Use monitor counters in ethernet netdevice to update FW
stats reported in the atomic get_stats64 ndo.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 43d4b297 5c7e8bbb
......@@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o
en_selftest.o en/port.o en/monitor_stats.o
#
# Netdev extra
......
......@@ -685,6 +685,8 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
struct work_struct update_stats_work;
struct work_struct monitor_counters_work;
struct mlx5_nb monitor_counters_nb;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
......@@ -940,6 +942,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Mellanox Technologies. */
#include "en.h"
#include "monitor_stats.h"
#include "lib/eq.h"
/* Driver will set the following watch counters list:
* Ppcnt.802_3:
* a_in_range_length_errors Type: 0x0, Counter: 0x0, group_id = N/A
* a_out_of_range_length_field Type: 0x0, Counter: 0x1, group_id = N/A
* a_frame_too_long_errors Type: 0x0, Counter: 0x2, group_id = N/A
* a_frame_check_sequence_errors Type: 0x0, Counter: 0x3, group_id = N/A
* a_alignment_errors Type: 0x0, Counter: 0x4, group_id = N/A
* if_out_discards Type: 0x0, Counter: 0x5, group_id = N/A
* Q_Counters:
* Q[index].rx_out_of_buffer Type: 0x1, Counter: 0x4, group_id = counter_ix
*/
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) <
NUM_REQ_PPCNT_COUNTER_S1)
return false;
if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) <
NUM_REQ_Q_COUNTERS_S1)
return false;
return true;
}
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
mutex_lock(&priv->state_lock);
mlx5e_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
mlx5e_monitor_counter_arm(priv);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
unsigned long event, void *eqe)
{
struct mlx5e_priv *priv = mlx5_nb_cof(nb, struct mlx5e_priv,
monitor_counters_nb);
queue_work(priv->wq, &priv->monitor_counters_work);
return NOTIFY_OK;
}
void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
{
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
}
static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
{
mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
cancel_work_sync(&priv->monitor_counters_work);
}
static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
{
enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
for (ppcnt_cnt = 0;
ppcnt_cnt < NUM_REQ_PPCNT_COUNTER_S1;
ppcnt_cnt++, cnt++) {
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].type,
MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter,
ppcnt_cnt);
}
return ppcnt_cnt;
}
static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
{
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].type,
MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter,
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter_group_id,
q_counter);
return 1;
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
max_num_of_counters >= (NUM_REQ_PPCNT_COUNTER_S1 + cnt))
cnt += fill_monitor_counter_ppcnt_set1(cnt, in);
if (num_q_counters >= NUM_REQ_Q_COUNTERS_S1 &&
max_num_of_counters >= (NUM_REQ_Q_COUNTERS_S1 + cnt) &&
q_counter)
cnt += fill_monitor_counter_q_counter_set1(cnt, q_counter, in);
MLX5_SET(set_monitor_counter_in, in, num_of_counters, cnt);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
mlx5e_monitor_counter_start(priv);
mlx5e_set_monitor_counter(priv);
mlx5e_monitor_counter_arm(priv);
queue_work(priv->wq, &priv->update_stats_work);
}
static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
mlx5e_monitor_counter_disable(priv);
mlx5e_monitor_counter_stop(priv);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
#ifndef __MLX5_MONITOR_H__
#define __MLX5_MONITOR_H__
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
#endif /* __MLX5_MONITOR_H__ */
......@@ -50,6 +50,7 @@
#include "en/port.h"
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
......@@ -263,7 +264,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
......@@ -2224,6 +2225,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
......@@ -3457,8 +3460,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
if (!mlx5e_monitor_counter_supported(priv)) {
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
}
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
......@@ -4899,6 +4904,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_register_vport_reps(priv);
......@@ -4938,6 +4945,9 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_unregister_vport_reps(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
}
......
......@@ -483,6 +483,9 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
......@@ -491,6 +494,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->IEEE_802_3_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
......@@ -603,6 +609,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->RFC_2819_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
......@@ -1078,6 +1087,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
int prio;
void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
......
......@@ -281,13 +281,16 @@ enum mlx5_flow_match_level {
/* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
};
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *in_mdev;
int mirror_count;
int split_count;
int out_count;
int action;
......@@ -296,7 +299,12 @@ struct mlx5_esw_flow_attr {
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
bool vlan_handled;
u32 encap_id;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
struct mlx5_core_dev *mdev;
u32 encap_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
struct mlx5_fc *counter;
......
......@@ -81,7 +81,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
bool mirror = !!(attr->mirror_count);
bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
......@@ -120,14 +120,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
for (j = attr->mirror_count; j < attr->out_count; j++) {
for (j = attr->split_count; j < attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = attr->out_rep[j]->vport;
dest[i].vport.num = attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act.reformat_id = attr->dests[j].encap_id;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.reformat_id =
attr->dests[j].encap_id;
}
i++;
}
}
......@@ -164,10 +171,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
flow_act.reformat_id = attr->encap_id;
fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
......@@ -182,7 +186,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
......@@ -216,13 +220,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < attr->mirror_count; i++) {
for (i = 0; i < attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = attr->out_rep[i]->vport;
dest[i].vport.num = attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.reformat_id = attr->dests[i].encap_id;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
......@@ -270,7 +278,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
bool fwd_rule)
{
bool mirror = (attr->mirror_count > 0);
bool split = (attr->split_count > 0);
mlx5_del_flow_rules(rule);
esw->offloads.num_flows--;
......@@ -279,7 +287,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw_put_prio_table(esw, attr->chain, attr->prio, 0);
} else {
esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
}
......@@ -327,7 +335,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep;
out_rep = attr->out_rep[0];
out_rep = attr->dests[0].rep;
if (push)
vport = in_rep;
......@@ -348,7 +356,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp;
in_rep = attr->in_rep;
out_rep = attr->out_rep[0];
out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
......@@ -400,7 +408,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
......@@ -460,7 +468,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--;
return 0;
......
......@@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
d1->vport.num == d2->vport.num) ||
d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
(d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
......
......@@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
/* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
......
......@@ -179,7 +179,12 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
/* For 128B CQEs the data is in the last 64B */
cqe += wq->fbc.log_stride == 7;
return cqe;
}
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
......
......@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params {
};
enum {
CQE_SIZE_64 = 0,
CQE_SIZE_128 = 1,
CQE_SIZE_128_PAD = 2,
CQE_STRIDE_64 = 0,
CQE_STRIDE_128 = 1,
CQE_STRIDE_128_PAD = 2,
};
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
......@@ -135,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
return padding_128_en ? CQE_SIZE_128_PAD :
size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
return padding_128_en ? CQE_STRIDE_128_PAD :
size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
......
......@@ -8283,7 +8283,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
u8 pptb[0x1];
u8 port_access_reg_cap_mask_10_to_0[0xb];
u8 port_access_reg_cap_mask_10_to_09[0x2];
u8 ppcnt[0x1];
u8 port_access_reg_cap_mask_07_to_00[0x8];
};
struct mlx5_ifc_pcam_reg_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment