Commit 4c78782e authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: kTLS, Check ICOSQ WQE size in advance

Instead of WARNing in runtime when TLS offload WQEs posted to ICOSQ are
over the hardware limit, check their size before enabling TLS RX
offload, and block the offload if the condition fails. It also allows to
drop a u16 field from struct mlx5e_icosq.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 21a0502d
...@@ -609,7 +609,6 @@ struct mlx5e_icosq { ...@@ -609,7 +609,6 @@ struct mlx5e_icosq {
/* control path */ /* control path */
struct mlx5_wq_ctrl wq_ctrl; struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
u16 max_sq_wqebbs;
struct work_struct recover_work; struct work_struct recover_work;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
......
...@@ -448,13 +448,7 @@ static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev) ...@@ -448,13 +448,7 @@ static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size) static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{ {
u16 room = sq->reserved_room; u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
"wqe_size %u is greater than max SQ WQEBBs %u",
wqe_size, sq->max_sq_wqebbs);
room += MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room); return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
} }
......
...@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = { ...@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_resync = mlx5e_ktls_resync, .tls_dev_resync = mlx5e_ktls_resync,
}; };
bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
return false;
/* Check the possibility to post the required ICOSQ WQEs. */
if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
return false;
if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
return false;
if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
return false;
return true;
}
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{ {
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
......
...@@ -61,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev) ...@@ -61,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx); return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
} }
static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
{
return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
}
struct mlx5e_tls_sw_stats { struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx; atomic64_t tx_tls_ctx;
......
...@@ -1232,7 +1232,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, ...@@ -1232,7 +1232,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c; sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room; sq->reserved_room = param->stop_room;
sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu); param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl); err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment