Commit 1be44b42 authored by Lama Kayal's avatar Lama Kayal Committed by Saeed Mahameed

net/mlx5e: Decouple fs_tcp from en.h

Make flow steering files fs_tcp.c/h independent of en.h
such that they go through the flow steering API only.

Make error reports be via mlx5_core API instead of netdev_err API, this
to ensure a safe decoupling from en.h, and prevent redundant argument
passing.
Signed-off-by: default avatarLama Kayal <lkayal@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4e0ecc17
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h> #include <mlx5_core.h>
#include "en_accel/fs_tcp.h" #include "en_accel/fs_tcp.h"
#include "fs_core.h" #include "fs_core.h"
...@@ -71,11 +71,11 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) ...@@ -71,11 +71,11 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
} }
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn, struct sock *sk, u32 tirn,
uint32_t flow_tag) uint32_t flow_tag)
{ {
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL; struct mlx5e_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
...@@ -92,11 +92,11 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, ...@@ -92,11 +92,11 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
case AF_INET: case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk); accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__, mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
&inet_sk(sk)->inet_rcv_saddr, &inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_sport, inet_sk(sk)->inet_sport,
&inet_sk(sk)->inet_daddr, &inet_sk(sk)->inet_daddr,
inet_sk(sk)->inet_dport); inet_sk(sk)->inet_dport);
break; break;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: case AF_INET6:
...@@ -138,19 +138,19 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, ...@@ -138,19 +138,19 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1); flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) if (IS_ERR(flow))
netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n", mlx5_core_err(mlx5e_fs_get_mdev(fs), "mlx5_add_flow_rules() failed, flow is %ld\n",
PTR_ERR(flow)); PTR_ERR(flow));
out: out:
kvfree(spec); kvfree(spec);
return flow; return flow;
} }
static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type) enum accel_fs_tcp_type type)
{ {
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t; struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act); MLX5_DECLARE_FLOW_ACT(flow_act);
...@@ -163,9 +163,9 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, ...@@ -163,9 +163,9 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, mlx5_core_err(mlx5e_fs_get_mdev(fs),
"%s: add default rule failed, accel_fs type=%d, err %d\n", "%s: add default rule failed, accel_fs type=%d, err %d\n",
__func__, type, err); __func__, type, err);
return err; return err;
} }
...@@ -263,10 +263,10 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft, ...@@ -263,10 +263,10 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
return err; return err;
} }
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type) static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{ {
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false); struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
struct mlx5e_flow_table *ft = &accel_tcp->tables[type]; struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
int err; int err;
...@@ -284,14 +284,14 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_ ...@@ -284,14 +284,14 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
return err; return err;
} }
netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n", mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs accel table id %u level %u\n",
ft->t->id, ft->t->level); ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type); err = accel_fs_tcp_create_groups(ft, type);
if (err) if (err)
goto err; goto err;
err = accel_fs_tcp_add_default_rule(priv, type); err = accel_fs_tcp_add_default_rule(fs, type);
if (err) if (err)
goto err; goto err;
...@@ -301,18 +301,18 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_ ...@@ -301,18 +301,18 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
return err; return err;
} }
static int accel_fs_tcp_disable(struct mlx5e_priv *priv) static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{ {
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i; int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */ /* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i)); err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) { if (err) {
netdev_err(priv->netdev, mlx5_core_err(mlx5e_fs_get_mdev(fs),
"%s: modify ttc[%d] default destination failed, err(%d)\n", "%s: modify ttc[%d] default destination failed, err(%d)\n",
__func__, fs_accel2tt(i), err); __func__, fs_accel2tt(i), err);
return err; return err;
} }
} }
...@@ -320,10 +320,10 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv) ...@@ -320,10 +320,10 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0; return 0;
} }
static int accel_fs_tcp_enable(struct mlx5e_priv *priv) static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{ {
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false); struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
int err, i; int err, i;
...@@ -334,18 +334,18 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv) ...@@ -334,18 +334,18 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
/* Modify ttc rules destination to point on the accel_fs FTs */ /* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest); err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) { if (err) {
netdev_err(priv->netdev, mlx5_core_err(mlx5e_fs_get_mdev(fs),
"%s: modify ttc[%d] destination to accel failed, err(%d)\n", "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
__func__, fs_accel2tt(i), err); __func__, fs_accel2tt(i), err);
return err; return err;
} }
} }
return 0; return 0;
} }
static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i) static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{ {
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t)) if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return; return;
...@@ -355,43 +355,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i) ...@@ -355,43 +355,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL; fs_tcp->tables[i].t = NULL;
} }
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{ {
struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(priv->fs); struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i; int i;
if (!accel_tcp) if (!accel_tcp)
return; return;
accel_fs_tcp_disable(priv); accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i); accel_fs_tcp_destroy_table(fs, i);
kfree(accel_tcp); kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(priv->fs, NULL); mlx5e_fs_set_accel_tcp(fs, NULL);
} }
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{ {
struct mlx5e_accel_fs_tcp *accel_tcp; struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err; int i, err;
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version)) if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP; return -EOPNOTSUPP;
accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL); accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
if (!accel_tcp) if (!accel_tcp)
return -ENOMEM; return -ENOMEM;
mlx5e_fs_set_accel_tcp(priv->fs, accel_tcp); mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
err = accel_fs_tcp_create_table(priv, i); err = accel_fs_tcp_create_table(fs, i);
if (err) if (err)
goto err_destroy_tables; goto err_destroy_tables;
} }
err = accel_fs_tcp_enable(priv); err = accel_fs_tcp_enable(fs);
if (err) if (err)
goto err_destroy_tables; goto err_destroy_tables;
...@@ -399,8 +399,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) ...@@ -399,8 +399,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables: err_destroy_tables:
while (--i >= 0) while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i); accel_fs_tcp_destroy_table(fs, i);
kfree(accel_tcp); kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(priv->fs, NULL); mlx5e_fs_set_accel_tcp(fs, NULL);
return err; return err;
} }
...@@ -4,19 +4,19 @@ ...@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__ #ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__ #define __MLX5E_ACCEL_FS_TCP_H__
#include "en.h" #include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv); int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv); void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn, struct sock *sk, u32 tirn,
uint32_t flow_tag); uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule); void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else #else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; } static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {} static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn, struct sock *sk, u32 tirn,
uint32_t flow_tag) uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); } { return ERR_PTR(-EOPNOTSUPP); }
......
...@@ -118,9 +118,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable) ...@@ -118,9 +118,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (enable) if (enable)
err = mlx5e_accel_fs_tcp_create(priv); err = mlx5e_accel_fs_tcp_create(priv->fs);
else else
mlx5e_accel_fs_tcp_destroy(priv); mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
...@@ -138,7 +138,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) ...@@ -138,7 +138,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM; return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) { if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
err = mlx5e_accel_fs_tcp_create(priv); err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) { if (err) {
destroy_workqueue(priv->tls->rx_wq); destroy_workqueue(priv->tls->rx_wq);
return err; return err;
...@@ -154,7 +154,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) ...@@ -154,7 +154,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
return; return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) if (priv->netdev->features & NETIF_F_HW_TLS_RX)
mlx5e_accel_fs_tcp_destroy(priv); mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq); destroy_workqueue(priv->tls->rx_wq);
} }
......
...@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work) ...@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out; goto out;
rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir), mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG); MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule)) if (!IS_ERR_OR_NULL(rule))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment