Commit 874fcf1d authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-05-25

This series includes updates for mlx5e netdev driver.

1) Allowr flow based VF vport mirroring under sriov switchdev scheme,
added support for offloading the TC mirred mirror sub-action, from
Chris Mi.

=================
From: Or Gerlitz <ogerlitz@mellanox.com>

The user will typically set the actions order such that the mirror
port (mirror VF) sees packets as the original port (VF under
mirroring) sent them or as it will receive them. In the general case,
it means that packets are potentially sent to the mirror port before
or after some actions were applied on them.

To properly do that, we follow on the exact action order as set for
the flow and make sure this will also be the case when we program the
HW offload.

If all the actions should apply before forwarding to the mirror and dest port,
mirroring is just multicasting to the two vports. Otherwise, we split
the TC flow to two HW rules, where the 1st applies only the actions
needed up to the mirror (if there are such) and the 2nd the rest of
the actions plus the forwarding to the dest vport.
=================

2) Move to order-0 only allocations (using fragmented work queues) for all
work queues used by the driver, RX and TX descriptor rings
(RQs, SQs and Completion Queues (CQs)), from Tariq Toukan.

3) Avoid resetting netdevice statistics on netdevice
state changes, from Eran Ben Elisha.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d71dbdaa 05909bab
...@@ -183,6 +183,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) ...@@ -183,6 +183,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe { struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth; struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0];
}; };
struct mlx5e_rx_wqe { struct mlx5e_rx_wqe {
...@@ -313,7 +314,7 @@ struct mlx5e_cq { ...@@ -313,7 +314,7 @@ struct mlx5e_cq {
/* control */ /* control */
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_frag_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct mlx5e_tx_wqe_info { struct mlx5e_tx_wqe_info {
...@@ -357,7 +358,6 @@ struct mlx5e_txqsq { ...@@ -357,7 +358,6 @@ struct mlx5e_txqsq {
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc; u32 dma_fifo_pc;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
...@@ -370,11 +370,11 @@ struct mlx5e_txqsq { ...@@ -370,11 +370,11 @@ struct mlx5e_txqsq {
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
void __iomem *uar_map; void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u8 min_inline_mode; u8 min_inline_mode;
u16 edge;
struct device *pdev; struct device *pdev;
__be32 mkey_be; __be32 mkey_be;
unsigned long state; unsigned long state;
...@@ -439,7 +439,6 @@ struct mlx5e_icosq { ...@@ -439,7 +439,6 @@ struct mlx5e_icosq {
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
void __iomem *uar_map; void __iomem *uar_map;
u32 sqn; u32 sqn;
u16 edge;
unsigned long state; unsigned long state;
/* control path */ /* control path */
...@@ -450,7 +449,7 @@ struct mlx5e_icosq { ...@@ -450,7 +449,7 @@ struct mlx5e_icosq {
static inline bool static inline bool
mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
{ {
return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
} }
struct mlx5e_dma_info { struct mlx5e_dma_info {
...@@ -527,7 +526,7 @@ struct mlx5e_rq { ...@@ -527,7 +526,7 @@ struct mlx5e_rq {
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
struct device *pdev; struct device *pdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_rq_stats stats; struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache; struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
...@@ -575,7 +574,7 @@ struct mlx5e_channel { ...@@ -575,7 +574,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc; struct irq_desc *irq_desc;
struct mlx5e_ch_stats stats; struct mlx5e_ch_stats *stats;
/* control */ /* control */
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
...@@ -591,6 +590,12 @@ struct mlx5e_channels { ...@@ -591,6 +590,12 @@ struct mlx5e_channels {
struct mlx5e_params params; struct mlx5e_params params;
}; };
struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
enum mlx5e_traffic_types { enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP, MLX5E_TT_IPV6_TCP,
...@@ -772,6 +777,8 @@ struct mlx5e_priv { ...@@ -772,6 +777,8 @@ struct mlx5e_priv {
struct mutex state_lock; /* Protects Interface state */ struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq; struct mlx5e_rq drop_rq;
rwlock_t stats_lock; /* Protects channels SW stats updates */
bool channels_active;
struct mlx5e_channels channels; struct mlx5e_channels channels;
u32 tisn[MLX5E_MAX_NUM_TC]; u32 tisn[MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt; struct mlx5e_rqt indir_rqt;
...@@ -792,6 +799,8 @@ struct mlx5e_priv { ...@@ -792,6 +799,8 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_stats stats; struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
u8 max_opened_tc;
struct hwtstamp_config tstamp; struct hwtstamp_config tstamp;
u16 q_counter; u16 q_counter;
u16 drop_rq_q_counter; u16 drop_rq_q_counter;
...@@ -956,10 +965,9 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, ...@@ -956,10 +965,9 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe, struct mlx5e_tx_wqe **wqe,
u16 *pi) u16 *pi)
{ {
struct mlx5_wq_cyc *wq; struct mlx5_wq_cyc *wq = &sq->wq;
wq = &sq->wq; *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
*pi = sq->pc & wq->sz_m1;
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi); *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(*wqe, 0, sizeof(**wqe)); memset(*wqe, 0, sizeof(**wqe));
} }
...@@ -967,7 +975,7 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, ...@@ -967,7 +975,7 @@ static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
static inline static inline
struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
{ {
u16 pi = *pc & wq->sz_m1; u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
......
...@@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, ...@@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
int headln; int headln;
int i; int i;
sq->stats.tls_ooo++; sq->stats->tls_ooo++;
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
/* We might get here if a retransmission reaches the driver /* We might get here if a retransmission reaches the driver
...@@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, ...@@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
skb_shinfo(nskb)->nr_frags = info.nr_frags; skb_shinfo(nskb)->nr_frags = info.nr_frags;
nskb->data_len = info.sync_len; nskb->data_len = info.sync_len;
nskb->len += info.sync_len; nskb->len += info.sync_len;
sq->stats.tls_resync_bytes += nskb->len; sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn)); cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi); mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
......
...@@ -130,28 +130,28 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) ...@@ -130,28 +130,28 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
struct mlx5e_sq_stats *sq_stats; struct mlx5e_sq_stats *sq_stats;
int i, j; int i, j;
read_lock(&priv->stats_lock);
if (!priv->channels_active)
goto out;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
for (i = 0; i < priv->channels.num; i++) { for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = &c->rq.stats; rq_stats = c->rq.stats;
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->channels.params.num_tc; j++) { for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats; sq_stats = c->sq[j].stats;
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
} }
} }
} out:
read_unlock(&priv->stats_lock);
static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
{
mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv);
} }
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
...@@ -871,6 +871,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, ...@@ -871,6 +871,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sw_stats *sstats = &priv->stats.sw; struct mlx5e_sw_stats *sstats = &priv->stats.sw;
mlx5e_rep_update_sw_counters(priv);
stats->rx_packets = sstats->rx_packets; stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes; stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets; stats->tx_packets = sstats->tx_packets;
...@@ -1046,7 +1048,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { ...@@ -1046,7 +1048,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_rx = mlx5e_cleanup_rep_rx, .cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx, .init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx, .cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_stats, .update_stats = mlx5e_rep_update_hw_counters,
.max_nch = mlx5e_get_rep_max_num_channels, .max_nch = mlx5e_get_rep_max_num_channels,
.update_carrier = NULL, .update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
......
...@@ -81,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -81,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
}; };
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
...@@ -109,20 +108,22 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) ...@@ -109,20 +108,22 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
return idx; return idx;
} }
static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats temp, *s = &temp; struct mlx5e_sw_stats temp, *s = &temp;
struct mlx5e_rq_stats *rq_stats; int i;
struct mlx5e_sq_stats *sq_stats;
struct mlx5e_ch_stats *ch_stats;
int i, j;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
for (i = 0; i < priv->channels.num; i++) { read_lock(&priv->stats_lock);
struct mlx5e_channel *c = priv->channels.c[i]; if (!priv->channels_active)
goto out;
rq_stats = &c->rq.stats; for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
ch_stats = &c->stats; struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
...@@ -149,8 +150,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -149,8 +150,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive; s->rx_cache_waive += rq_stats->cache_waive;
s->ch_eq_rearm += ch_stats->eq_rearm; s->ch_eq_rearm += ch_stats->eq_rearm;
for (j = 0; j < priv->channels.params.num_tc; j++) { for (j = 0; j < priv->max_opened_tc; j++) {
sq_stats = &c->sq[j].stats; struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
...@@ -175,10 +176,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -175,10 +176,9 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
} }
} }
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
memcpy(&priv->stats.sw, s, sizeof(*s)); memcpy(&priv->stats.sw, s, sizeof(*s));
out:
read_unlock(&priv->stats_lock);
} }
static const struct counter_desc q_stats_desc[] = { static const struct counter_desc q_stats_desc[] = {
...@@ -580,12 +580,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = { ...@@ -580,12 +580,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
}; };
#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
{ {
/* "1" for link_down_events special counter */
return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
NUM_PPORT_PHY_COUNTERS : 0; NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
} }
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
...@@ -593,10 +594,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -593,10 +594,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
{ {
int i; int i;
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
pport_phy_statistical_stats_desc[i].format); return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format);
return idx; return idx;
} }
...@@ -604,11 +609,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) ...@@ -604,11 +609,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
{ {
int i; int i;
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) /* link_down_events_phy has special handling since it is not stored in __be64 format */
for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
data[idx++] = counter_set.phys_layer_cntrs.link_down_events);
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i); if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
data[idx++] =
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i);
return idx; return idx;
} }
...@@ -1148,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = { ...@@ -1148,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{ {
return (NUM_RQ_STATS * priv->channels.num) + int max_nch = priv->profile->max_nch(priv->mdev);
(NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
(NUM_SQ_STATS * max_nch * priv->max_opened_tc);
} }
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx) int idx)
{ {
int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc; int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx; return idx;
for (i = 0; i < priv->channels.num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++) for (j = 0; j < NUM_CH_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i); ch_stats_desc[j].format, i);
for (i = 0; i < priv->channels.num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->channels.params.num_tc; tc++) /* priv->channel_tc2txq[i][tc] is valid only when device is open */
for (i = 0; i < priv->channels.num; i++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format, sq_stats_desc[j].format,
...@@ -1183,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1183,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx) int idx)
{ {
struct mlx5e_channels *channels = &priv->channels; int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc; int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx; return idx;
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++) for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j); ch_stats_desc, j);
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j); rq_stats_desc, j);
for (tc = 0; tc < priv->channels.params.num_tc; tc++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j); sq_stats_desc, j);
return idx; return idx;
...@@ -1217,7 +1235,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { ...@@ -1217,7 +1235,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
.get_num_stats = mlx5e_grp_sw_get_num_stats, .get_num_stats = mlx5e_grp_sw_get_num_stats,
.fill_strings = mlx5e_grp_sw_fill_strings, .fill_strings = mlx5e_grp_sw_fill_strings,
.fill_stats = mlx5e_grp_sw_fill_stats, .fill_stats = mlx5e_grp_sw_fill_stats,
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
.update_stats = mlx5e_grp_sw_update_stats, .update_stats = mlx5e_grp_sw_update_stats,
}, },
{ {
......
...@@ -97,9 +97,6 @@ struct mlx5e_sw_stats { ...@@ -97,9 +97,6 @@ struct mlx5e_sw_stats {
u64 tx_tls_ooo; u64 tx_tls_ooo;
u64 tx_tls_resync_bytes; u64 tx_tls_resync_bytes;
#endif #endif
/* Special handling counters */
u64 link_down_events_phy;
}; };
struct mlx5e_qcounter_stats { struct mlx5e_qcounter_stats {
...@@ -242,4 +239,6 @@ struct mlx5e_stats_grp { ...@@ -242,4 +239,6 @@ struct mlx5e_stats_grp {
extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps; extern const int mlx5e_num_stats_grps;
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */ #endif /* __MLX5_EN_STATS_H__ */
...@@ -75,12 +75,14 @@ enum { ...@@ -75,12 +75,14 @@ enum {
MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
}; };
#define MLX5E_TC_MAX_SPLITS 1
struct mlx5e_tc_flow { struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
u64 cookie; u64 cookie;
u8 flags; u8 flags;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
struct list_head encap; /* flows sharing the same encap ID */ struct list_head encap; /* flows sharing the same encap ID */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
...@@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, ...@@ -794,8 +796,8 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_nic_flow_attr *attr = flow->nic_attr;
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(flow->rule); counter = mlx5_flow_rule_counter(flow->rule[0]);
mlx5_del_flow_rules(flow->rule); mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter); mlx5_fc_destroy(priv->mdev, counter);
if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
...@@ -844,8 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -844,8 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
out_priv = netdev_priv(encap_dev); out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep; attr->out_rep[attr->out_count] = rpriv->rep;
attr->out_mdev = out_priv->mdev; attr->out_mdev[attr->out_count++] = out_priv->mdev;
} }
err = mlx5_eswitch_add_vlan_action(esw, attr); err = mlx5_eswitch_add_vlan_action(esw, attr);
...@@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -870,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
if (attr->mirror_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
if (IS_ERR(flow->rule[1]))
goto err_fwd_rule;
}
} }
return rule; return rule;
err_fwd_rule:
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
rule = flow->rule[1];
err_add_rule: err_add_rule:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow); mlx5e_detach_mod_hdr(priv, flow);
...@@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -893,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr); if (attr->mirror_count)
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
} }
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
...@@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -929,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(flow, &e->flows, encap) {
esw_attr = flow->esw_attr; esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id; esw_attr->encap_id = e->encap_id;
flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule)) { if (IS_ERR(flow->rule[0])) {
err = PTR_ERR(flow->rule); err = PTR_ERR(flow->rule[0]);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err); err);
continue; continue;
} }
if (esw_attr->mirror_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
err = PTR_ERR(flow->rule[1]);
mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
err);
continue;
}
}
flow->flags |= MLX5E_TC_FLOW_OFFLOADED; flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
} }
} }
...@@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -948,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); if (attr->mirror_count)
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
} }
} }
...@@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) ...@@ -984,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
continue; continue;
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5_flow_rule_counter(flow->rule); counter = mlx5_flow_rule_counter(flow->rule[0]);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true; neigh_used = true;
...@@ -2537,6 +2566,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2537,6 +2566,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return err; return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->mirror_count = attr->out_count;
continue; continue;
} }
...@@ -2548,12 +2578,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2548,12 +2578,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (is_tcf_mirred_egress_redirect(a)) { if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
struct net_device *out_dev;
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
struct net_device *out_dev;
out_dev = tcf_mirred_dev(a); out_dev = tcf_mirred_dev(a);
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
pr_err("can't support more than %d output ports, can't offload forwarding\n",
attr->out_count);
return -EOPNOTSUPP;
}
if (switchdev_port_same_parent_id(priv->netdev, if (switchdev_port_same_parent_id(priv->netdev,
out_dev) || out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) { is_merged_eswitch_dev(priv, out_dev)) {
...@@ -2561,8 +2597,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2561,8 +2597,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep; attr->out_rep[attr->out_count] = rpriv->rep;
attr->out_mdev = out_priv->mdev; attr->out_mdev[attr->out_count++] = out_priv->mdev;
} else if (encap) { } else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->mirred_ifindex = out_dev->ifindex;
parse_attr->tun_info = *info; parse_attr->tun_info = *info;
...@@ -2585,6 +2621,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2585,6 +2621,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
encap = true; encap = true;
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
attr->mirror_count = attr->out_count;
continue; continue;
} }
...@@ -2606,6 +2643,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2606,6 +2643,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
} else { /* action is TCA_VLAN_ACT_MODIFY */ } else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
attr->mirror_count = attr->out_count;
continue; continue;
} }
...@@ -2621,6 +2659,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2621,6 +2659,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow)) if (!actions_match_supported(priv, exts, parse_attr, flow))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
return -EOPNOTSUPP;
}
return 0; return 0;
} }
...@@ -2700,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, ...@@ -2700,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0) if (err < 0)
goto err_free; goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else { } else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
if (err < 0) if (err < 0)
goto err_free; goto err_free;
flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
} }
if (IS_ERR(flow->rule)) { if (IS_ERR(flow->rule[0])) {
err = PTR_ERR(flow->rule); err = PTR_ERR(flow->rule[0]);
if (err != -EAGAIN) if (err != -EAGAIN)
goto err_free; goto err_free;
} }
...@@ -2782,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, ...@@ -2782,7 +2825,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
return 0; return 0;
counter = mlx5_flow_rule_counter(flow->rule); counter = mlx5_flow_rule_counter(flow->rule[0]);
if (!counter) if (!counter)
return 0; return 0;
......
...@@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) ...@@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_sq_stats *stats = sq->stats;
struct net_dim_sample dim_sample; struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return; return;
net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample); &dim_sample);
net_dim(&sq->dim, dim_sample); net_dim(&sq->dim, dim_sample);
} }
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{ {
struct mlx5e_rq_stats *stats = rq->stats;
struct net_dim_sample dim_sample; struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return; return;
net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample); &dim_sample);
net_dim(&rq->dim, dim_sample); net_dim(&rq->dim, dim_sample);
} }
......
...@@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
spec->match_criteria_enable = match_header; spec->match_criteria_enable = match_header;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = flow_rule =
mlx5_add_flow_rules(esw->fdb_table.fdb, spec, mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
&flow_act, &dest, 1); &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create FDB Table err %d\n", err); esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out; goto out;
} }
esw->fdb_table.fdb = fdb; esw->fdb_table.legacy.fdb = fdb;
/* Addresses group : Full match unicast/multicast addresses */ /* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
...@@ -343,9 +343,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -343,9 +343,9 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.addr_grp = NULL;
} }
if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
mlx5_destroy_flow_table(esw->fdb_table.fdb); mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
esw->fdb_table.fdb = NULL; esw->fdb_table.legacy.fdb = NULL;
} }
} }
...@@ -355,15 +355,15 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -355,15 +355,15 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{ {
if (!esw->fdb_table.fdb) if (!esw->fdb_table.legacy.fdb)
return; return;
esw_debug(esw->dev, "Destroy FDB Table\n"); esw_debug(esw->dev, "Destroy FDB Table\n");
mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb); mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
esw->fdb_table.fdb = NULL; esw->fdb_table.legacy.fdb = NULL;
esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL;
...@@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add: fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */ /* SRIOV is enabled: Forward UC MAC to vport */
if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
...@@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u8 *mac = vaddr->node.addr; u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport; u32 vport = vaddr->vport;
if (!esw->fdb_table.fdb) if (!esw->fdb_table.legacy.fdb)
return 0; return 0;
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
...@@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
u8 *mac = vaddr->node.addr; u8 *mac = vaddr->node.addr;
u32 vport = vaddr->vport; u32 vport = vaddr->vport;
if (!esw->fdb_table.fdb) if (!esw->fdb_table.legacy.fdb)
return 0; return 0;
esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
......
...@@ -55,6 +55,9 @@ ...@@ -55,6 +55,9 @@
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
struct vport_ingress { struct vport_ingress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp; struct mlx5_flow_group *allow_untagged_spoofchk_grp;
...@@ -117,16 +120,18 @@ struct mlx5_vport { ...@@ -117,16 +120,18 @@ struct mlx5_vport {
}; };
struct mlx5_eswitch_fdb { struct mlx5_eswitch_fdb {
void *fdb;
union { union {
struct legacy_fdb { struct legacy_fdb {
struct mlx5_flow_table *fdb;
struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *addr_grp;
struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *allmulti_grp;
struct mlx5_flow_group *promisc_grp; struct mlx5_flow_group *promisc_grp;
} legacy; } legacy;
struct offloads_fdb { struct offloads_fdb {
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_uni;
...@@ -214,6 +219,10 @@ struct mlx5_flow_handle * ...@@ -214,6 +219,10 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr);
void void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule, struct mlx5_flow_handle *rule,
...@@ -234,12 +243,18 @@ enum mlx5_flow_match_level { ...@@ -234,12 +243,18 @@ enum mlx5_flow_match_level {
MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
}; };
/* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep; struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *out_mdev; struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
int mirror_count;
int out_count;
int action; int action;
__be16 vlan_proto; __be16 vlan_proto;
u16 vlan_vid; u16 vlan_vid;
......
...@@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_table *ft = NULL;
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
int j, i = 0;
void *misc; void *misc;
int i = 0;
if (esw->mode != SRIOV_OFFLOADS) if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (attr->mirror_count)
ft = esw->fdb_table.offloads.fwd_fdb;
else
ft = esw->fdb_table.offloads.fast_fdb;
flow_act.action = attr->action; flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */ /* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
...@@ -70,14 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -70,14 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; for (j = attr->mirror_count; j < attr->out_count; j++) {
dest[i].vport.num = attr->out_rep->vport; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { dest[i].vport.num = attr->out_rep[j]->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev, vhca_id); MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
dest[i].vport.vhca_id_valid = 1; dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
i++;
} }
i++;
} }
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true); counter = mlx5_fc_create(esw->dev, true);
...@@ -119,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -119,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
flow_act.encap_id = attr->encap_id; flow_act.encap_id = attr->encap_id;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
else else
...@@ -134,6 +139,57 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -134,6 +139,57 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule; return rule;
} }
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *rule;
void *misc;
int i;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < attr->mirror_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = attr->out_rep[i]->vport;
dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
i++;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(attr->in_mdev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
if (attr->match_level == MLX5_MATCH_NONE)
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
else
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
if (!IS_ERR(rule))
esw->offloads.num_flows++;
return rule;
}
void void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule, struct mlx5_flow_handle *rule,
...@@ -173,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) ...@@ -173,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep; in_rep = attr->in_rep;
out_rep = attr->out_rep; out_rep = attr->out_rep[0];
if (push) if (push)
vport = in_rep; vport = in_rep;
...@@ -194,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, ...@@ -194,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp; goto out_notsupp;
in_rep = attr->in_rep; in_rep = attr->in_rep;
out_rep = attr->out_rep; out_rep = attr->out_rep[0];
if (push && in_rep->vport == FDB_UPLINK_VPORT) if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp; goto out_notsupp;
...@@ -245,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, ...@@ -245,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) { if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */ /* tracks VF --> wire rules without vlan push action */
if (attr->out_rep->vport == FDB_UPLINK_VPORT) { if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++; vport->vlan_refcount++;
attr->vlan_handled = true; attr->vlan_handled = true;
} }
...@@ -305,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -305,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) { if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */ /* tracks VF --> wire rules without vlan push action */
if (attr->out_rep->vport == FDB_UPLINK_VPORT) if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--; vport->vlan_refcount--;
return 0; return 0;
...@@ -363,7 +419,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -363,7 +419,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.vport.num = vport; dest.vport.num = vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1); &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
...@@ -407,7 +463,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -407,7 +463,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = 0; dest.vport.num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1); &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
...@@ -422,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -422,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16); outer_headers.dmac_47_16);
dmac_v[0] = 0x01; dmac_v[0] = 0x01;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
&flow_act, &dest, 1); &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
...@@ -454,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) ...@@ -454,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
if (!root_ns) { if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n"); esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out; goto out_namespace;
} }
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
...@@ -464,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) ...@@ -464,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
if (mlx5_esw_has_fwd_fdb(dev))
esw_size >>= 1;
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN; flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
...@@ -474,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) ...@@ -474,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
err = PTR_ERR(fdb); err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
goto out; goto out_namespace;
} }
esw->fdb_table.fdb = fdb; esw->fdb_table.offloads.fast_fdb = fdb;
if (!mlx5_esw_has_fwd_fdb(dev))
goto out_namespace;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
esw_size,
ESW_OFFLOADS_NUM_GROUPS, 1,
flags);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create fwd table err %d\n", err);
goto out_ft;
}
esw->fdb_table.offloads.fwd_fdb = fdb;
out: return err;
out_ft:
mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
out_namespace:
return err; return err;
} }
static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
{ {
mlx5_destroy_flow_table(esw->fdb_table.fdb); if (mlx5_esw_has_fwd_fdb(esw->dev))
mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
} }
#define MAX_PF_SQ 256 #define MAX_PF_SQ 256
...@@ -530,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -530,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
goto slow_fdb_err; goto slow_fdb_err;
} }
esw->fdb_table.offloads.fdb = fdb; esw->fdb_table.offloads.slow_fdb = fdb;
/* create send-to-vport group */ /* create send-to-vport group */
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
...@@ -586,9 +665,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -586,9 +665,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
miss_err: miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err: send_vport_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err: slow_fdb_err:
mlx5_destroy_flow_table(esw->fdb_table.fdb); esw_destroy_offloads_fast_fdb_table(esw);
fast_fdb_err: fast_fdb_err:
ns_err: ns_err:
kvfree(flow_group_in); kvfree(flow_group_in);
...@@ -597,7 +676,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) ...@@ -597,7 +676,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{ {
if (!esw->fdb_table.fdb) if (!esw->fdb_table.offloads.fast_fdb)
return; return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
...@@ -606,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -606,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
esw_destroy_offloads_fast_fdb_table(esw); esw_destroy_offloads_fast_fdb_table(esw);
} }
......
...@@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) ...@@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
} }
inlen = MLX5_ST_SZ_BYTES(create_cq_in) + inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages; sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
err = -ENOMEM; err = -ENOMEM;
...@@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) ...@@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift - MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT); MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas); mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
kvfree(in); kvfree(in);
...@@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) ...@@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
goto out; goto out;
err_cqwq: err_cqwq:
mlx5_cqwq_destroy(&conn->cq.wq_ctrl); mlx5_wq_destroy(&conn->cq.wq_ctrl);
out: out:
return err; return err;
} }
...@@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) ...@@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
tasklet_disable(&conn->cq.tasklet); tasklet_disable(&conn->cq.tasklet);
tasklet_kill(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet);
mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
mlx5_cqwq_destroy(&conn->cq.wq_ctrl); mlx5_wq_destroy(&conn->cq.wq_ctrl);
} }
static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
...@@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, ...@@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
if (MLX5_CAP_GEN(mdev, cqe_version) == 1) if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
mlx5_fill_page_array(&conn->qp.wq_ctrl.buf, mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
if (err) if (err)
......
...@@ -54,7 +54,7 @@ struct mlx5_fpga_conn { ...@@ -54,7 +54,7 @@ struct mlx5_fpga_conn {
/* CQ */ /* CQ */
struct { struct {
struct mlx5_cqwq wq; struct mlx5_cqwq wq;
struct mlx5_frag_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
struct mlx5_core_cq mcq; struct mlx5_core_cq mcq;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
} cq; } cq;
......
...@@ -2495,7 +2495,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2495,7 +2495,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_root_ns) if (!steering->fdb_root_ns)
return -ENOMEM; return -ENOMEM;
prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
if (IS_ERR(prio)) if (IS_ERR(prio))
goto out_err; goto out_err;
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#ifndef __MLX5E_IPOB_H__ #ifndef __MLX5E_IPOB_H__
#define __MLX5E_IPOB_H__ #define __MLX5E_IPOB_H__
#ifdef CONFIG_MLX5_CORE_IPOIB
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "en.h" #include "en.h"
...@@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void); ...@@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
/* Extract mlx5e_priv from IPoIB netdev */ /* Extract mlx5e_priv from IPoIB netdev */
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
struct mlx5i_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_datagram_seg datagram;
struct mlx5_wqe_eth_pad pad;
struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0];
};
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5i_tx_wqe **wqe,
u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
*pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
*wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
memset(*wqe, 0, sizeof(**wqe));
}
netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey); struct mlx5_av *av, u32 dqpn, u32 dqkey);
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
#endif /* CONFIG_MLX5_CORE_IPOIB */
#endif /* __MLX5E_IPOB_H__ */ #endif /* __MLX5E_IPOB_H__ */
...@@ -36,7 +36,12 @@ ...@@ -36,7 +36,12 @@
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
{ {
return (u32)wq->sz_m1 + 1; return (u32)wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
{
return (u32)wq->fbc.frag_sz_m1 + 1;
} }
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
...@@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) ...@@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{ {
return (u32)wq->sz_m1 + 1; return (u32)wq->fbc.sz_m1 + 1;
} }
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
{ {
return mlx5_wq_cyc_get_size(wq) << wq->log_stride; return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
} }
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
...@@ -67,17 +72,19 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) ...@@ -67,17 +72,19 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
{ {
return mlx5_wq_ll_get_size(wq) << wq->log_stride; return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
} }
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq, void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err; int err;
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; MLX5_GET(wq, wqc, log_wq_sz),
fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
...@@ -85,14 +92,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -85,14 +92,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node); &wq_ctrl->buf, param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free; goto err_db_free;
} }
wq->buf = wq_ctrl->buf.frags->buf; fbc->frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db; wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev; wq_ctrl->mdev = mdev;
...@@ -105,17 +112,35 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -105,17 +112,35 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp)
{
struct mlx5_frag_buf *rqb, *sqb;
rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
rqb->npages = 1 << get_order(rqb->size);
sqb = &qp->sq.fbc.frag_buf;
*sqb = *buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
sqb->npages = 1 << get_order(sqb->size);
sqb->frags += rqb->npages; /* first part is for the rq */
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq, void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
int err; int err;
wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4; mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1; MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB); mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1; MLX5_GET(qpc, qpc, log_sq_size),
&wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
...@@ -123,15 +148,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -123,15 +148,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node); &wq_ctrl->buf, param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free; goto err_db_free;
} }
wq->rq.buf = wq_ctrl->buf.frags->buf; mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
...@@ -147,7 +172,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -147,7 +172,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
int err; int err;
...@@ -160,7 +185,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -160,7 +185,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
} }
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->frag_buf, &wq_ctrl->buf,
param->buf_numa_node); param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
...@@ -168,7 +193,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -168,7 +193,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free; goto err_db_free;
} }
wq->fbc.frag_buf = wq_ctrl->frag_buf; wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db; wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev; wq_ctrl->mdev = mdev;
...@@ -185,12 +210,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -185,12 +210,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq, void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg; struct mlx5_wqe_srq_next_seg *next_seg;
int err; int err;
int i; int i;
wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; MLX5_GET(wq, wqc, log_wq_sz),
fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
...@@ -198,17 +225,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -198,17 +225,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node); &wq_ctrl->buf, param->buf_numa_node);
if (err) { if (err) {
mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free; goto err_db_free;
} }
wq->buf = wq_ctrl->buf.frags->buf; wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db; wq->db = wq_ctrl->db.db;
for (i = 0; i < wq->sz_m1; i++) { for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg = mlx5_wq_ll_get_wqe(wq, i);
next_seg->next_wqe_index = cpu_to_be16(i + 1); next_seg->next_wqe_index = cpu_to_be16(i + 1);
} }
...@@ -227,12 +254,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -227,12 +254,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
{ {
mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
} }
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
{
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
}
...@@ -48,17 +48,9 @@ struct mlx5_wq_ctrl { ...@@ -48,17 +48,9 @@ struct mlx5_wq_ctrl {
struct mlx5_db db; struct mlx5_db db;
}; };
struct mlx5_frag_wq_ctrl {
struct mlx5_core_dev *mdev;
struct mlx5_frag_buf frag_buf;
struct mlx5_db db;
};
struct mlx5_wq_cyc { struct mlx5_wq_cyc {
void *buf; struct mlx5_frag_buf_ctrl fbc;
__be32 *db; __be32 *db;
u16 sz_m1;
u8 log_stride;
}; };
struct mlx5_wq_qp { struct mlx5_wq_qp {
...@@ -73,20 +65,19 @@ struct mlx5_cqwq { ...@@ -73,20 +65,19 @@ struct mlx5_cqwq {
}; };
struct mlx5_wq_ll { struct mlx5_wq_ll {
void *buf; struct mlx5_frag_buf_ctrl fbc;
__be32 *db; __be32 *db;
__be16 *tail_next; __be16 *tail_next;
u16 sz_m1;
u16 head; u16 head;
u16 wqe_ctr; u16 wqe_ctr;
u16 cur_sz; u16 cur_sz;
u8 log_stride;
}; };
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq, void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl); struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq, void *qpc, struct mlx5_wq_qp *wq,
...@@ -94,7 +85,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -94,7 +85,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_frag_wq_ctrl *wq_ctrl); struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
...@@ -103,16 +94,20 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -103,16 +94,20 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{ {
return ctr & wq->sz_m1; return ctr & wq->fbc.sz_m1;
}
static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr & wq->fbc.frag_sz_m1;
} }
static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
{ {
return wq->buf + (ix << wq->log_stride); return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
} }
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
...@@ -123,9 +118,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) ...@@ -123,9 +118,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
return !equal && !smaller; return !equal && !smaller;
} }
static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr)
{
return ctr & wq->fbc.sz_m1;
}
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
{ {
return wq->cc & wq->fbc.sz_m1; return mlx5_cqwq_ctr2ix(wq, wq->cc);
} }
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
...@@ -133,9 +133,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) ...@@ -133,9 +133,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix); return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
} }
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
{
return ctr >> wq->fbc.log_sz;
}
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
{ {
return wq->cc >> wq->fbc.log_sz; return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc);
} }
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
...@@ -166,7 +171,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) ...@@ -166,7 +171,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{ {
return wq->cur_sz == wq->sz_m1; return wq->cur_sz == wq->fbc.sz_m1;
} }
static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
...@@ -174,9 +179,14 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) ...@@ -174,9 +179,14 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
return !wq->cur_sz; return !wq->cur_sz;
} }
static inline u16 mlx5_wq_ll_ctr2ix(struct mlx5_wq_ll *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
}
static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
{ {
return wq->buf + (ix << wq->log_stride); return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
} }
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
......
...@@ -983,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key) ...@@ -983,16 +983,24 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u; return key & 0xffffff00u;
} }
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
void *cqc) struct mlx5_frag_buf_ctrl *fbc)
{ {
fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); fbc->log_stride = log_stride;
fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size); fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1; fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
} }
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
void *cqc)
{
mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
MLX5_GET(cqc, cqc, log_cq_size),
fbc);
}
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix) u32 ix)
{ {
......
...@@ -524,7 +524,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits { ...@@ -524,7 +524,9 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
}; };
struct mlx5_ifc_flow_table_eswitch_cap_bits { struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_0[0x200]; u8 reserved_at_0[0x1c];
u8 fdb_multi_path_to_table[0x1];
u8 reserved_at_1d[0x1e3];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment