Commit 09f83569 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Use the new TIR API for kTLS

One of the previous commits introduced a dedicated object for a TIR.
kTLS code creates a TIR per connection using the low-level mlx5_core
API. This commit converts it to the new mlx5e_tir API.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 65d6b6e5
...@@ -140,6 +140,18 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder) ...@@ -140,6 +140,18 @@ void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder)
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
} }
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
WARN_ON(builder->modify);
MLX5_SET(tirc, tirc, tls_en, 1);
MLX5_SET(tirc, tirc, self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
}
int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder,
struct mlx5_core_dev *mdev, bool reg) struct mlx5_core_dev *mdev, bool reg)
{ {
......
...@@ -34,6 +34,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, ...@@ -34,6 +34,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_traffic_type *rss_tt, const struct mlx5e_rss_params_traffic_type *rss_tt,
bool inner); bool inner);
void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder); void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder);
void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder);
struct mlx5_core_dev; struct mlx5_core_dev;
......
...@@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx { ...@@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_rq_stats *rq_stats; struct mlx5e_rq_stats *rq_stats;
struct mlx5e_tls_sw_stats *sw_stats; struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx; struct completion add_ctx;
u32 tirn; struct mlx5e_tir tir;
u32 key_id; u32 key_id;
u32 rxq; u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
...@@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void) ...@@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list; return resp_list;
} }
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
{ {
int err, inlen; struct mlx5e_tir_builder *builder;
void *tirc; int err;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_tir_in); builder = mlx5e_tir_builder_alloc(false);
in = kvzalloc(inlen, GFP_KERNEL); if (!builder)
if (!in)
return -ENOMEM; return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
mlx5e_tir_builder_build_direct(builder);
MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); mlx5e_tir_builder_build_tls(builder);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); err = mlx5e_tir_init(tir, builder, mdev, false);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
MLX5_SET(tirc, tirc, indirect_table, rqtn);
MLX5_SET(tirc, tirc, tls_en, 1);
MLX5_SET(tirc, tirc, self_lb_block,
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
err = mlx5_core_create_tir(mdev, in, tirn); mlx5e_tir_builder_free(builder);
kvfree(in);
return err; return err;
} }
...@@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work) ...@@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work)
goto out; goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG); mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule)) if (!IS_ERR_OR_NULL(rule))
accel_rule->rule = rule; accel_rule->rule = rule;
out: out:
...@@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq, ...@@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
priv_rx->tirn, priv_rx->key_id, mlx5e_tir_get_tirn(&priv_rx->tir),
priv_rx->resync.seq, false, priv_rx->key_id, priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX); TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) { wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
...@@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq, ...@@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq,
pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false, mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn,
next_record_tcp_sn, mlx5e_tir_get_tirn(&priv_rx->tir),
false, next_record_tcp_sn,
TLS_OFFLOAD_CTX_DIR_RX); TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) { wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
...@@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, ...@@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
psv = &wqe->psv; psv = &wqe->psv;
psv->num_psv = 1 << 4; psv->num_psv = 1 << 4;
psv->l_key = sq->channel->mkey_be; psv->l_key = sq->channel->mkey_be;
psv->psv_index[0] = cpu_to_be32(priv_rx->tirn); psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir));
psv->va = cpu_to_be64(buf->dma_addr); psv->va = cpu_to_be64(buf->dma_addr);
wi = (struct mlx5e_icosq_wqe_info) { wi = (struct mlx5e_icosq_wqe_info) {
...@@ -637,7 +630,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, ...@@ -637,7 +630,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
rqtn = mlx5e_rqt_get_rqtn(&priv->rx_res->channels[rxq].direct_rqt); rqtn = mlx5e_rqt_get_rqtn(&priv->rx_res->channels[rxq].direct_rqt);
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
if (err) if (err)
goto err_create_tir; goto err_create_tir;
...@@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, ...@@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
return 0; return 0;
err_post_wqes: err_post_wqes:
mlx5_core_destroy_tir(mdev, priv_rx->tirn); mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir: err_create_tir:
mlx5_ktls_destroy_key(mdev, priv_rx->key_id); mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
err_create_key: err_create_key:
...@@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) ...@@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
if (priv_rx->rule.rule) if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule); mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
mlx5_core_destroy_tir(mdev, priv_rx->tirn); mlx5e_tir_destroy(&priv_rx->tir);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id); mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
/* priv_rx should normally be freed here, but if there is an outstanding /* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment