Commit b2f7b01d authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Leon Romanovsky

net/mlx5e: Simulate missing IPsec TX limits hardware functionality

ConnectX-7 devices don't have ability to send TX hard/soft limits
events. As a possible workaround, let's rely on existing infrastructure
and use periodic check of cached flow counter. In these periodic checks,
we call to xfrm_state_check_expire() to check and mark state accordingly.

Once the state is marked as XFRM_STATE_EXPIRED, the SA flow rule is
changed to drop all the traffic.

Link: https://lore.kernel.org/r/94a5d82c0c399747117d8a558f9beebfbcf26154.1680162300.git.leonro@nvidia.comSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent 4562116f
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#include "ipsec.h" #include "ipsec.h"
#include "ipsec_rxtx.h" #include "ipsec_rxtx.h"
#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{ {
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle; return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
...@@ -50,6 +52,28 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) ...@@ -50,6 +52,28 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
} }
static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
{
struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
struct xfrm_state *x = sa_entry->x;
spin_lock(&x->lock);
xfrm_state_check_expire(x);
if (x->km.state == XFRM_STATE_EXPIRED) {
sa_entry->attrs.drop = true;
mlx5e_accel_ipsec_fs_modify(sa_entry);
}
spin_unlock(&x->lock);
if (sa_entry->attrs.drop)
return;
queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
MLX5_IPSEC_RESCHED);
}
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{ {
struct xfrm_state *x = sa_entry->x; struct xfrm_state *x = sa_entry->x;
...@@ -464,6 +488,31 @@ static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -464,6 +488,31 @@ static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
return 0; return 0;
} }
static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_state *x = sa_entry->x;
struct mlx5e_ipsec_dwork *dwork;
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return 0;
if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
return 0;
if (x->lft.soft_packet_limit == XFRM_INF &&
x->lft.hard_packet_limit == XFRM_INF)
return 0;
dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
if (!dwork)
return -ENOMEM;
dwork->sa_entry = sa_entry;
INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
sa_entry->dwork = dwork;
return 0;
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x, static int mlx5e_xfrm_add_state(struct xfrm_state *x,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
...@@ -504,10 +553,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -504,10 +553,14 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err) if (err)
goto err_xfrm; goto err_xfrm;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
goto release_work;
/* create hw context */ /* create hw context */
err = mlx5_ipsec_create_sa_ctx(sa_entry); err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err) if (err)
goto release_work; goto release_dwork;
err = mlx5e_accel_ipsec_fs_add_rule(sa_entry); err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err) if (err)
...@@ -523,6 +576,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -523,6 +576,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
goto err_add_rule; goto err_add_rule;
mlx5e_ipsec_set_esn_ops(sa_entry); mlx5e_ipsec_set_esn_ops(sa_entry);
if (sa_entry->dwork)
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
MLX5_IPSEC_RESCHED);
out: out:
x->xso.offload_handle = (unsigned long)sa_entry; x->xso.offload_handle = (unsigned long)sa_entry;
return 0; return 0;
...@@ -531,6 +588,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -531,6 +588,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
mlx5e_accel_ipsec_fs_del_rule(sa_entry); mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx: err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry);
release_dwork:
kfree(sa_entry->dwork);
release_work: release_work:
kfree(sa_entry->work->data); kfree(sa_entry->work->data);
kfree(sa_entry->work); kfree(sa_entry->work);
...@@ -563,8 +622,12 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) ...@@ -563,8 +622,12 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work) if (sa_entry->work)
cancel_work_sync(&sa_entry->work->work); cancel_work_sync(&sa_entry->work->work);
if (sa_entry->dwork)
cancel_delayed_work_sync(&sa_entry->dwork->dwork);
mlx5e_accel_ipsec_fs_del_rule(sa_entry); mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry->dwork);
kfree(sa_entry->work->data); kfree(sa_entry->work->data);
kfree(sa_entry->work); kfree(sa_entry->work);
sa_entry_free: sa_entry_free:
......
...@@ -93,6 +93,7 @@ struct mlx5_accel_esp_xfrm_attrs { ...@@ -93,6 +93,7 @@ struct mlx5_accel_esp_xfrm_attrs {
struct upspec upspec; struct upspec upspec;
u8 dir : 2; u8 dir : 2;
u8 type : 2; u8 type : 2;
u8 drop : 1;
u8 family; u8 family;
struct mlx5_replay_esn replay_esn; struct mlx5_replay_esn replay_esn;
u32 authsize; u32 authsize;
...@@ -140,6 +141,11 @@ struct mlx5e_ipsec_work { ...@@ -140,6 +141,11 @@ struct mlx5e_ipsec_work {
void *data; void *data;
}; };
struct mlx5e_ipsec_dwork {
struct delayed_work dwork;
struct mlx5e_ipsec_sa_entry *sa_entry;
};
struct mlx5e_ipsec_aso { struct mlx5e_ipsec_aso {
u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -193,6 +199,7 @@ struct mlx5e_ipsec_sa_entry { ...@@ -193,6 +199,7 @@ struct mlx5e_ipsec_sa_entry {
u32 enc_key_id; u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule; struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5e_ipsec_work *work; struct mlx5e_ipsec_work *work;
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits; struct mlx5e_ipsec_limits limits;
}; };
...@@ -235,6 +242,7 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); ...@@ -235,6 +242,7 @@ int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry); void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
......
...@@ -926,9 +926,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -926,9 +926,12 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND; flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (attrs->drop)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
else
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = rx->ft.status; dest[0].ft = rx->ft.status;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
...@@ -1018,9 +1021,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -1018,9 +1021,13 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC; flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
flow_act.crypto.obj_id = sa_entry->ipsec_obj_id; flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
flow_act.flags |= FLOW_ACT_NO_APPEND; flow_act.flags |= FLOW_ACT_NO_APPEND;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (attrs->drop)
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
else
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest[0].ft = tx->ft.status; dest[0].ft = tx->ft.status;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
...@@ -1430,3 +1437,19 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) ...@@ -1430,3 +1437,19 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
kfree(ipsec->tx); kfree(ipsec->tx);
return err; return err;
} }
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
int err;
memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
if (err)
return;
mlx5e_accel_ipsec_fs_del_rule(sa_entry);
memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment