Commit 73248801 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-04-09

This series provides some fixes to mlx5 driver.

I've cc'ed some of the checksum fixes to Eric Dumazet and i would like to get
his feedback before you pull.

For -stable v4.19
('net/mlx5: FPGA, tls, idr remove on flow delete')
('net/mlx5: FPGA, tls, hold rcu read lock a bit longer')

For -stable v4.20
('net/mlx5e: Rx, Check ip headers sanity')
('Revert "net/mlx5e: Enable reporting checksum unnecessary also for L3 packets"')
('net/mlx5e: Rx, Fixup skb checksum for packets with tail padding')

For -stable v5.0
('net/mlx5e: Switch to Toeplitz RSS hash by default')
('net/mlx5e: Protect against non-uplink representor for encap')
('net/mlx5e: XDP, Avoid checksum complete when XDP prog is loaded')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 69f23a09 7ee2ace9
...@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); ...@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
* switching channels * switching channels
*/ */
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs, struct mlx5e_channels *new_chs,
mlx5e_fp_hw_modify hw_modify); mlx5e_fp_hw_modify hw_modify);
......
...@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) ...@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
{ {
int err; int err = 0;
rtnl_lock(); rtnl_lock();
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_close_locked(priv->netdev);
err = mlx5e_open_locked(priv->netdev); if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto out;
err = mlx5e_safe_reopen_channels(priv);
out:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
rtnl_unlock(); rtnl_unlock();
......
...@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, ...@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (!(mlx5e_eswitch_rep(*out_dev) &&
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
return -EOPNOTSUPP;
return 0; return 0;
} }
......
...@@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) ...@@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
struct mlx5e_channel *c; struct mlx5e_channel *c;
int i; int i;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
priv->channels.params.xdp_prog)
return 0; return 0;
for (i = 0; i < channels->num; i++) { for (i = 0; i < channels->num; i++) {
......
...@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, ...@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled) if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) /* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
* skb->checksum incorrect.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0; return 0;
...@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, ...@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
return 0; return 0;
} }
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channels new_channels = {};
new_channels.params = priv->channels.params;
return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv) void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{ {
priv->tstamp.tx_type = HWTSTAMP_TX_OFF; priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
...@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) ...@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!report_failed) if (!report_failed)
goto unlock; goto unlock;
mlx5e_close_locked(priv->netdev); err = mlx5e_safe_reopen_channels(priv);
err = mlx5e_open_locked(priv->netdev);
if (err) if (err)
netdev_err(priv->netdev, netdev_err(priv->netdev,
"mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
err); err);
unlock: unlock:
...@@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, ...@@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
{ {
enum mlx5e_traffic_types tt; enum mlx5e_traffic_types tt;
rss_params->hfunc = ETH_RSS_HASH_XOR; rss_params->hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss_params->toeplitz_hash_key, netdev_rss_key_fill(rss_params->toeplitz_hash_key,
sizeof(rss_params->toeplitz_hash_key)); sizeof(rss_params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
......
...@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, ...@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
{ {
*proto = ((struct ethhdr *)skb->data)->h_proto; *proto = ((struct ethhdr *)skb->data)->h_proto;
*proto = __vlan_get_protocol(skb, *proto, network_depth); *proto = __vlan_get_protocol(skb, *proto, network_depth);
return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
if (*proto == htons(ETH_P_IP))
return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
if (*proto == htons(ETH_P_IPV6))
return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
return false;
} }
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
...@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) ...@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc; rq->stats->ecn_mark += !!rc;
} }
static u32 mlx5e_get_fcs(const struct sk_buff *skb)
{
const void *fcs_bytes;
u32 _fcs_bytes;
fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
ETH_FCS_LEN, &_fcs_bytes);
return __get_unaligned_cpu32(fcs_bytes);
}
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{ {
void *ip_p = skb->data + network_depth; void *ip_p = skb->data + network_depth;
...@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) ...@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
#define MAX_PADDING 8
static void
tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
struct mlx5e_rq_stats *stats)
{
stats->csum_complete_tail_slow++;
skb->csum = csum_block_add(skb->csum,
skb_checksum(skb, offset, len, 0),
offset);
}
static void
tail_padding_csum(struct sk_buff *skb, int offset,
struct mlx5e_rq_stats *stats)
{
u8 tail_padding[MAX_PADDING];
int len = skb->len - offset;
void *tail;
if (unlikely(len > MAX_PADDING)) {
tail_padding_csum_slow(skb, offset, len, stats);
return;
}
tail = skb_header_pointer(skb, offset, len, tail_padding);
if (unlikely(!tail)) {
tail_padding_csum_slow(skb, offset, len, stats);
return;
}
stats->csum_complete_tail++;
skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
}
static void
mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
struct mlx5e_rq_stats *stats)
{
struct ipv6hdr *ip6;
struct iphdr *ip4;
int pkt_len;
switch (proto) {
case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth);
pkt_len = network_depth + ntohs(ip4->tot_len);
break;
case htons(ETH_P_IPV6):
ip6 = (struct ipv6hdr *)(skb->data + network_depth);
pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
break;
default:
return;
}
if (likely(pkt_len >= skb->len))
return;
tail_padding_csum(skb, pkt_len, stats);
}
static inline void mlx5e_handle_csum(struct net_device *netdev, static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq, struct mlx5e_rq *rq,
...@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; return;
} }
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) /* True when explicitly set via priv flag, or XDP prog is loaded */
if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
goto csum_unnecessary; goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet /* CQE csum doesn't cover padding octets in short ethernet
...@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN, skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN, network_depth - ETH_HLEN,
skb->csum); skb->csum);
if (unlikely(netdev->features & NETIF_F_RXFCS))
skb->csum = csum_block_add(skb->csum, mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
(__force __wsum)mlx5e_get_fcs(skb),
skb->len - ETH_FCS_LEN);
stats->csum_complete++; stats->csum_complete++;
return; return;
} }
csum_unnecessary: csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) && if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
((cqe->hds_ip_ext & CQE_L4_OK) || (cqe->hds_ip_ext & CQE_L4_OK))) {
(get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) { if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1; skb->csum_level = 1;
......
...@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
...@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none; s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_drop += rq_stats->xdp_drop;
...@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
......
...@@ -71,6 +71,8 @@ struct mlx5e_sw_stats { ...@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary; u64 rx_csum_unnecessary;
u64 rx_csum_none; u64 rx_csum_none;
u64 rx_csum_complete; u64 rx_csum_complete;
u64 rx_csum_complete_tail;
u64 rx_csum_complete_tail_slow;
u64 rx_csum_unnecessary_inner; u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop; u64 rx_xdp_drop;
u64 rx_xdp_redirect; u64 rx_xdp_redirect;
...@@ -181,6 +183,8 @@ struct mlx5e_rq_stats { ...@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 csum_complete; u64 csum_complete;
u64 csum_complete_tail;
u64 csum_complete_tail_slow;
u64 csum_unnecessary; u64 csum_unnecessary;
u64 csum_unnecessary_inner; u64 csum_unnecessary_inner;
u64 csum_none; u64 csum_none;
......
...@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, ...@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
return ret; return ret;
} }
static void mlx5_fpga_tls_release_swid(struct idr *idr, static void *mlx5_fpga_tls_release_swid(struct idr *idr,
spinlock_t *idr_spinlock, u32 swid) spinlock_t *idr_spinlock, u32 swid)
{ {
unsigned long flags; unsigned long flags;
void *ptr;
spin_lock_irqsave(idr_spinlock, flags); spin_lock_irqsave(idr_spinlock, flags);
idr_remove(idr, swid); ptr = idr_remove(idr, swid);
spin_unlock_irqrestore(idr_spinlock, flags); spin_unlock_irqrestore(idr_spinlock, flags);
return ptr;
} }
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
...@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, ...@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
kfree(buf); kfree(buf);
} }
struct mlx5_teardown_stream_context {
struct mlx5_fpga_tls_command_context cmd;
u32 swid;
};
static void static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev, struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd, struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp) struct mlx5_fpga_dma_buf *resp)
{ {
struct mlx5_teardown_stream_context *ctx =
container_of(cmd, struct mlx5_teardown_stream_context, cmd);
if (resp) { if (resp) {
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
...@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, ...@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev, mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d", "Teardown stream failed with syndrome = %d",
syndrome); syndrome);
else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
&fdev->tls->tx_idr_spinlock,
ctx->swid);
else
mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
&fdev->tls->rx_idr_spinlock,
ctx->swid);
} }
mlx5_fpga_tls_put_command_ctx(cmd); mlx5_fpga_tls_put_command_ctx(cmd);
} }
...@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, ...@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
void *cmd; void *cmd;
int ret; int ret;
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
rcu_read_unlock();
if (!flow) {
WARN_ONCE(1, "Received NULL pointer for handle\n");
return -EINVAL;
}
buf = kzalloc(size, GFP_ATOMIC); buf = kzalloc(size, GFP_ATOMIC);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
cmd = (buf + 1); cmd = (buf + 1);
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
if (unlikely(!flow)) {
rcu_read_unlock();
WARN_ONCE(1, "Received NULL pointer for handle\n");
kfree(buf);
return -EINVAL;
}
mlx5_fpga_tls_flow_to_cmd(flow, cmd); mlx5_fpga_tls_flow_to_cmd(flow, cmd);
rcu_read_unlock();
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
...@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq, ...@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags) void *flow, u32 swid, gfp_t flags)
{ {
struct mlx5_teardown_stream_context *ctx; struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_dma_buf *buf; struct mlx5_fpga_dma_buf *buf;
void *cmd; void *cmd;
...@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, ...@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
if (!ctx) if (!ctx)
return; return;
buf = &ctx->cmd.buf; buf = &ctx->buf;
cmd = (ctx + 1); cmd = (ctx + 1);
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
MLX5_SET(tls_cmd, cmd, swid, swid); MLX5_SET(tls_cmd, cmd, swid, swid);
...@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, ...@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
buf->sg[0].data = cmd; buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
ctx->swid = swid; mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
mlx5_fpga_tls_teardown_completion); mlx5_fpga_tls_teardown_completion);
} }
...@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, ...@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
struct mlx5_fpga_tls *tls = mdev->fpga->tls; struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow; void *flow;
rcu_read_lock();
if (direction_sx) if (direction_sx)
flow = idr_find(&tls->tx_idr, swid); flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
&tls->tx_idr_spinlock,
swid);
else else
flow = idr_find(&tls->rx_idr, swid); flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
&tls->rx_idr_spinlock,
rcu_read_unlock(); swid);
if (!flow) { if (!flow) {
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
...@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid, ...@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
return; return;
} }
synchronize_rcu(); /* before kfree(flow) */
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment