Commit 97cfede0 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-macsec-extended-packet-number-and-replay-window-offload'

Saeed Mahameed says:

====================
mlx5 MACSec Extended packet number and replay window offload

This is a follow up series to the previously submitted mlx5 MACsec offload [1]
earlier this release cycle.

In this series we add the support for MACsec Extended packet number and
replay window offloads.

First patch is a simple modification (code movements) to the core macsec code
to allow exposing the EPN related user properties to the offloading
device driver.

The rest of the patches are mlx5 specific, we start off with fixing some
trivial issues with mlx5 MACsec code, and a simple refactoring to allow
additional functionality in mlx5 macsec to support EPN and window replay
offloads.
 A) Expose mkey creation functionality to MACsec
 B) Expose ASO object to MACsec, to allow advanced steering operations,
    ASO objects are used to modify MACsec steering objects in fastpath.

1) Support MACsec offload extended packet number (EPN)

    MACsec EPN splits the packet number (PN) into two 32-bits fields,
    epn_lsb (32 least significant bits (LSBs) of PN) and epn_msb (32
    most significant bits (MSBs) of PN).
    Epn_msb bits are managed by SW and for that HW is required to send
    an object change event of type EPN event notifying the SW to update
    the epn_msb in addition, once epn_msb is updated SW update HW with
    the new epn_msb value for HW to perform replay protection.
    To prevent HW from stopping while handling the event, SW manages
    another bit for HW called epn_overlap, HW uses the latter to get
    an indication regarding how to read the epn_msb value correctly
    while still receiving packets.
    Add epn event handling that updates the epn_overlap and epn_msb for
    every 2^31 packets according to the following logic:
    if epn_lsb crosses 2^31 (half sequence number wraparound) upon HW
    relevant event, SW updates the esn_overlap value to OLD (value = 1).
    When the epn_lsb crosses 2^32 (full sequence number wraparound)
    upon HW relevant event, SW updates the esn_overlap to NEW
    (value = 0) and increment the esn_msb.
    When using MACsec EPN a salt and short secure channel id (ssci)
    needs to be provided by the user, when offloading EPN need to pass
    this salt and ssci to the HW to be used in the initial vector (IV)
    calculations.

2) Support MACsec offload replay window

    Support setting replay window size for MACsec offload.
    Currently supported window size of 32, 64, 128 and 256
    bit. Other values will be returned as invalid parameter.

[1] https://lore.kernel.org/netdev/20220906052129.104507-1-saeed@kernel.org/
====================

Link: https://lore.kernel.org/r/20220921181054.40249-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents bcff1a37 eb43846b
......@@ -1134,6 +1134,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
......
......@@ -66,7 +66,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_EN_MACSEC */
#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
......@@ -46,8 +46,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
u32 *mkey)
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
......
......@@ -5055,10 +5055,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
err = mlx5e_macsec_init(priv);
if (err)
mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
......@@ -5076,7 +5072,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_macsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
......@@ -5202,9 +5197,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
err = mlx5e_macsec_init(priv);
if (err)
mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
......@@ -5262,6 +5262,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
......
......@@ -575,6 +575,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
......
......@@ -36,6 +36,7 @@ static struct mlx5_nb events_nbs_ref[] = {
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_OBJECT_CHANGE },
/* QP/WQ resource events to forward */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
......@@ -132,6 +133,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
case MLX5_EVENT_TYPE_OBJECT_CHANGE:
return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
......
......@@ -11,7 +11,9 @@
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
#define ASO_CTRL_READ_EN BIT(0)
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
......@@ -70,6 +72,7 @@ enum {
enum {
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
......
......@@ -1828,6 +1828,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
if (secy->xpn) {
rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
......@@ -1850,12 +1856,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
if (secy->xpn) {
rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
......@@ -2070,6 +2070,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
if (secy->xpn) {
tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
......@@ -2092,12 +2098,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
if (secy->xpn) {
tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
......
......@@ -325,6 +325,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
......@@ -699,6 +700,12 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
struct mlx5_eqe_obj_change {
u8 rsvd0[2];
__be16 obj_type;
__be32 obj_id;
} __packed;
#define SYNC_RST_STATE_MASK 0xf
enum sync_rst_state_type {
......@@ -737,6 +744,7 @@ union ev_data {
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
struct mlx5_eqe_vhca_state vhca_state;
struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
......
......@@ -11558,6 +11558,20 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
enum {
MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
};
enum {
MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
#define MLX5_MACSEC_ASO_INC_SN 0x2
#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
struct mlx5_ifc_macsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_1[0x1];
......@@ -11585,15 +11599,15 @@ struct mlx5_ifc_macsec_offload_obj_bits {
u8 confidentiality_en[0x1];
u8 reserved_at_41[0x1];
u8 esn_en[0x1];
u8 esn_overlap[0x1];
u8 epn_en[0x1];
u8 epn_overlap[0x1];
u8 reserved_at_44[0x2];
u8 confidentiality_offset[0x2];
u8 reserved_at_48[0x4];
u8 aso_return_reg[0x4];
u8 reserved_at_50[0x10];
u8 esn_msb[0x20];
u8 epn_msb[0x20];
u8 reserved_at_80[0x8];
u8 dekn[0x18];
......@@ -11619,6 +11633,21 @@ struct mlx5_ifc_create_macsec_obj_in_bits {
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
struct mlx5_ifc_modify_macsec_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
enum {
MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
};
struct mlx5_ifc_query_macsec_obj_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment