Commit a21ecf0e authored by Era Mayflower's avatar Era Mayflower Committed by David S. Miller

macsec: Support XPN frame handling - IEEE 802.1AEbw

Support extended packet number cipher suites (802.1AEbw) frames handling.
This does not include the needed netlink patches.

    * Added xpn boolean field to `struct macsec_secy`.
    * Added ssci field to `struct_macsec_tx_sa` (802.1AE figure 10-5).
    * Added ssci field to `struct_macsec_rx_sa` (802.1AE figure 10-5).
    * Added salt field to `struct macsec_key` (802.1AE 10.7 NOTE 1).
    * Created pn_t type for easy access to lower and upper halves.
    * Created salt_t type for easy access to the "ssci" and "pn" parts.
    * Created `macsec_fill_iv_xpn` function to create IV in XPN mode.
    * Support in PN recovery and preliminary replay check in XPN mode.

In addition, according to IEEE 802.1AEbw figure 10-5, the PN of incoming
frame can be 0 when XPN cipher suite is used, so fixed the function
`macsec_validate_skb` to fail on PN=0 only if XPN is off.
Signed-off-by: default avatarEra Mayflower <mayflowerera@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 65b7a2c8
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <net/gro_cells.h> #include <net/gro_cells.h>
#include <net/macsec.h> #include <net/macsec.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <uapi/linux/if_macsec.h> #include <uapi/linux/if_macsec.h>
...@@ -68,6 +69,16 @@ struct macsec_eth_header { ...@@ -68,6 +69,16 @@ struct macsec_eth_header {
sc; \ sc; \
sc = rtnl_dereference(sc->next)) sc = rtnl_dereference(sc->next))
#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
struct gcm_iv_xpn {
union {
u8 short_secure_channel_id[4];
ssci_t ssci;
};
__be64 pn;
} __packed;
struct gcm_iv { struct gcm_iv {
union { union {
u8 secure_channel_id[8]; u8 secure_channel_id[8];
...@@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec, ...@@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
return __macsec_get_ops(macsec->offload, macsec, ctx); return __macsec_get_ops(macsec->offload, macsec, ctx);
} }
/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
{ {
struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
int len = skb->len - 2 * ETH_ALEN; int len = skb->len - 2 * ETH_ALEN;
...@@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) ...@@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
if (h->unused) if (h->unused)
return false; return false;
/* rx.pn != 0 (figure 10-5) */ /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
if (!h->packet_number) if (!h->packet_number && !xpn)
return false; return false;
/* length check, f) g) h) i) */ /* length check, f) g) h) i) */
...@@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) ...@@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
salt_t salt)
{
struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
gcm_iv->ssci = ssci ^ salt.ssci;
gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
}
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
{ {
struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
...@@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa) ...@@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
} }
EXPORT_SYMBOL_GPL(macsec_pn_wrapped); EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
struct macsec_secy *secy)
{ {
u32 pn; pn_t pn;
spin_lock_bh(&tx_sa->lock); spin_lock_bh(&tx_sa->lock);
pn = tx_sa->next_pn;
pn = tx_sa->next_pn_halves;
if (secy->xpn)
tx_sa->next_pn++; tx_sa->next_pn++;
else
tx_sa->next_pn_halves.lower++;
if (tx_sa->next_pn == 0) if (tx_sa->next_pn == 0)
__macsec_pn_wrapped(secy, tx_sa); __macsec_pn_wrapped(secy, tx_sa);
spin_unlock_bh(&tx_sa->lock); spin_unlock_bh(&tx_sa->lock);
...@@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, ...@@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sa *tx_sa; struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev); struct macsec_dev *macsec = macsec_priv(dev);
bool sci_present; bool sci_present;
u32 pn; pn_t pn;
secy = &macsec->secy; secy = &macsec->secy;
tx_sc = &secy->tx_sc; tx_sc = &secy->tx_sc;
...@@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, ...@@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
memmove(hh, eth, 2 * ETH_ALEN); memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy); pn = tx_sa_update_pn(tx_sa, secy);
if (pn == 0) { if (pn.full64 == 0) {
macsec_txsa_put(tx_sa); macsec_txsa_put(tx_sa);
kfree_skb(skb); kfree_skb(skb);
return ERR_PTR(-ENOLINK); return ERR_PTR(-ENOLINK);
} }
macsec_fill_sectag(hh, secy, pn, sci_present); macsec_fill_sectag(hh, secy, pn.lower, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len); skb_put(skb, secy->icv_len);
...@@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, ...@@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
macsec_fill_iv(iv, secy->sci, pn); if (secy->xpn)
macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
else
macsec_fill_iv(iv, secy->sci, pn.lower);
sg_init_table(sg, ret); sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len); ret = skb_to_sgvec(skb, sg, 0, skb->len);
...@@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u ...@@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u32 lowest_pn = 0; u32 lowest_pn = 0;
spin_lock(&rx_sa->lock); spin_lock(&rx_sa->lock);
if (rx_sa->next_pn >= secy->replay_window) if (rx_sa->next_pn_halves.lower >= secy->replay_window)
lowest_pn = rx_sa->next_pn - secy->replay_window; lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
/* Now perform replay protection check again /* Now perform replay protection check again
* (see IEEE 802.1AE-2006 figure 10-5) * (see IEEE 802.1AE-2006 figure 10-5)
*/ */
if (secy->replay_protect && pn < lowest_pn) { if (secy->replay_protect && pn < lowest_pn &&
(!secy->xpn || pn_same_half(pn, lowest_pn))) {
spin_unlock(&rx_sa->lock); spin_unlock(&rx_sa->lock);
u64_stats_update_begin(&rxsc_stats->syncp); u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++; rxsc_stats->stats.InPktsLate++;
...@@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u ...@@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
} }
u64_stats_update_end(&rxsc_stats->syncp); u64_stats_update_end(&rxsc_stats->syncp);
if (pn >= rx_sa->next_pn) // Instead of "pn >=" - to support pn overflow in xpn
rx_sa->next_pn = pn + 1; if (pn + 1 > rx_sa->next_pn_halves.lower) {
rx_sa->next_pn_halves.lower = pn + 1;
} else if (secy->xpn &&
!pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
rx_sa->next_pn_halves.upper++;
rx_sa->next_pn_halves.lower = pn + 1;
}
spin_unlock(&rx_sa->lock); spin_unlock(&rx_sa->lock);
} }
...@@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, ...@@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
unsigned char *iv; unsigned char *iv;
struct aead_request *req; struct aead_request *req;
struct macsec_eth_header *hdr; struct macsec_eth_header *hdr;
u32 hdr_pn;
u16 icv_len = secy->icv_len; u16 icv_len = secy->icv_len;
macsec_skb_cb(skb)->valid = false; macsec_skb_cb(skb)->valid = false;
...@@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, ...@@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
} }
hdr = (struct macsec_eth_header *)skb->data; hdr = (struct macsec_eth_header *)skb->data;
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); hdr_pn = ntohl(hdr->packet_number);
if (secy->xpn) {
pn_t recovered_pn = rx_sa->next_pn_halves;
recovered_pn.lower = hdr_pn;
if (hdr_pn < rx_sa->next_pn_halves.lower &&
!pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
recovered_pn.upper++;
macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
rx_sa->key.salt);
} else {
macsec_fill_iv(iv, sci, hdr_pn);
}
sg_init_table(sg, ret); sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len); ret = skb_to_sgvec(skb, sg, 0, skb->len);
...@@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) ...@@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd; struct macsec_rxh_data *rxd;
struct macsec_dev *macsec; struct macsec_dev *macsec;
sci_t sci; sci_t sci;
u32 pn; u32 hdr_pn;
bool cbit; bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats; struct pcpu_rx_sc_stats *rxsc_stats;
struct pcpu_secy_stats *secy_stats; struct pcpu_secy_stats *secy_stats;
...@@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) ...@@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy_stats = this_cpu_ptr(macsec->stats); secy_stats = this_cpu_ptr(macsec->stats);
rxsc_stats = this_cpu_ptr(rx_sc->stats); rxsc_stats = this_cpu_ptr(rx_sc->stats);
if (!macsec_validate_skb(skb, secy->icv_len)) { if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
u64_stats_update_begin(&secy_stats->syncp); u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++; secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp); u64_stats_update_end(&secy_stats->syncp);
...@@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) ...@@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
} }
/* First, PN check to avoid decrypting obviously wrong packets */ /* First, PN check to avoid decrypting obviously wrong packets */
pn = ntohl(hdr->packet_number); hdr_pn = ntohl(hdr->packet_number);
if (secy->replay_protect) { if (secy->replay_protect) {
bool late; bool late;
spin_lock(&rx_sa->lock); spin_lock(&rx_sa->lock);
late = rx_sa->next_pn >= secy->replay_window && late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
pn < (rx_sa->next_pn - secy->replay_window); hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
if (secy->xpn)
late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
spin_unlock(&rx_sa->lock); spin_unlock(&rx_sa->lock);
if (late) { if (late) {
...@@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) ...@@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED; return RX_HANDLER_CONSUMED;
} }
if (!macsec_post_decrypt(skb, secy, pn)) if (!macsec_post_decrypt(skb, secy, hdr_pn))
goto drop; goto drop;
deliver: deliver:
...@@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) ...@@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) { if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock); spin_lock_bh(&rx_sa->lock);
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock); spin_unlock_bh(&rx_sa->lock);
} }
...@@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) ...@@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
} }
spin_lock_bh(&tx_sa->lock); spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock); spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
...@@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
u8 assoc_num; u8 assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_operational, was_active; bool was_operational, was_active;
u32 prev_pn = 0; pn_t prev_pn;
int ret = 0; int ret = 0;
prev_pn.full64 = 0;
if (!attrs[MACSEC_ATTR_IFINDEX]) if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL; return -EINVAL;
...@@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) { if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock); spin_lock_bh(&tx_sa->lock);
prev_pn = tx_sa->next_pn; prev_pn = tx_sa->next_pn_halves;
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock); spin_unlock_bh(&tx_sa->lock);
} }
...@@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
cleanup: cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) { if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&tx_sa->lock); spin_lock_bh(&tx_sa->lock);
tx_sa->next_pn = prev_pn; tx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&tx_sa->lock); spin_unlock_bh(&tx_sa->lock);
} }
tx_sa->active = was_active; tx_sa->active = was_active;
...@@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
bool was_active; bool was_active;
u32 prev_pn = 0; pn_t prev_pn;
int ret = 0; int ret = 0;
prev_pn.full64 = 0;
if (!attrs[MACSEC_ATTR_IFINDEX]) if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL; return -EINVAL;
...@@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) { if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock); spin_lock_bh(&rx_sa->lock);
prev_pn = rx_sa->next_pn; prev_pn = rx_sa->next_pn_halves;
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock); spin_unlock_bh(&rx_sa->lock);
} }
...@@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) ...@@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
cleanup: cleanup:
if (tb_sa[MACSEC_SA_ATTR_PN]) { if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock); spin_lock_bh(&rx_sa->lock);
rx_sa->next_pn = prev_pn; rx_sa->next_pn_halves = prev_pn;
spin_unlock_bh(&rx_sa->lock); spin_unlock_bh(&rx_sa->lock);
} }
rx_sa->active = was_active; rx_sa->active = was_active;
...@@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
} }
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
nla_nest_cancel(skb, txsa_nest); nla_nest_cancel(skb, txsa_nest);
...@@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_end(skb, attr); nla_nest_end(skb, attr);
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) || nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
nla_nest_cancel(skb, rxsa_nest); nla_nest_cancel(skb, rxsa_nest);
......
...@@ -11,18 +11,45 @@ ...@@ -11,18 +11,45 @@
#include <uapi/linux/if_link.h> #include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h> #include <uapi/linux/if_macsec.h>
#define MACSEC_SALT_LEN 12
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
typedef u64 __bitwise sci_t; typedef u64 __bitwise sci_t;
typedef u32 __bitwise ssci_t;
#define MACSEC_NUM_AN 4 /* 2 bits for the association number */ typedef union salt {
struct {
u32 ssci;
u64 pn;
} __packed;
u8 bytes[MACSEC_SALT_LEN];
} __packed salt_t;
typedef union pn {
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u32 lower;
u32 upper;
#elif defined(__BIG_ENDIAN_BITFIELD)
u32 upper;
u32 lower;
#else
#error "Please fix <asm/byteorder.h>"
#endif
};
u64 full64;
} pn_t;
/** /**
* struct macsec_key - SA key * struct macsec_key - SA key
* @id: user-provided key identifier * @id: user-provided key identifier
* @tfm: crypto struct, key storage * @tfm: crypto struct, key storage
* @salt: salt used to generate IV in XPN cipher suites
*/ */
struct macsec_key { struct macsec_key {
u8 id[MACSEC_KEYID_LEN]; u8 id[MACSEC_KEYID_LEN];
struct crypto_aead *tfm; struct crypto_aead *tfm;
salt_t salt;
}; };
struct macsec_rx_sc_stats { struct macsec_rx_sc_stats {
...@@ -64,12 +91,17 @@ struct macsec_tx_sc_stats { ...@@ -64,12 +91,17 @@ struct macsec_tx_sc_stats {
* @next_pn: packet number expected for the next packet * @next_pn: packet number expected for the next packet
* @lock: protects next_pn manipulations * @lock: protects next_pn manipulations
* @key: key structure * @key: key structure
* @ssci: short secure channel identifier
* @stats: per-SA stats * @stats: per-SA stats
*/ */
struct macsec_rx_sa { struct macsec_rx_sa {
struct macsec_key key; struct macsec_key key;
ssci_t ssci;
spinlock_t lock; spinlock_t lock;
u32 next_pn; union {
pn_t next_pn_halves;
u64 next_pn;
};
refcount_t refcnt; refcount_t refcnt;
bool active; bool active;
struct macsec_rx_sa_stats __percpu *stats; struct macsec_rx_sa_stats __percpu *stats;
...@@ -110,12 +142,17 @@ struct macsec_rx_sc { ...@@ -110,12 +142,17 @@ struct macsec_rx_sc {
* @next_pn: packet number to use for the next packet * @next_pn: packet number to use for the next packet
* @lock: protects next_pn manipulations * @lock: protects next_pn manipulations
* @key: key structure * @key: key structure
* @ssci: short secure channel identifier
* @stats: per-SA stats * @stats: per-SA stats
*/ */
struct macsec_tx_sa { struct macsec_tx_sa {
struct macsec_key key; struct macsec_key key;
ssci_t ssci;
spinlock_t lock; spinlock_t lock;
u32 next_pn; union {
pn_t next_pn_halves;
u64 next_pn;
};
refcount_t refcnt; refcount_t refcnt;
bool active; bool active;
struct macsec_tx_sa_stats __percpu *stats; struct macsec_tx_sa_stats __percpu *stats;
...@@ -152,6 +189,7 @@ struct macsec_tx_sc { ...@@ -152,6 +189,7 @@ struct macsec_tx_sc {
* @key_len: length of keys used by the cipher suite * @key_len: length of keys used by the cipher suite
* @icv_len: length of ICV used by the cipher suite * @icv_len: length of ICV used by the cipher suite
* @validate_frames: validation mode * @validate_frames: validation mode
* @xpn: enable XPN for this SecY
* @operational: MAC_Operational flag * @operational: MAC_Operational flag
* @protect_frames: enable protection for this SecY * @protect_frames: enable protection for this SecY
* @replay_protect: enable packet number checks on receive * @replay_protect: enable packet number checks on receive
...@@ -166,6 +204,7 @@ struct macsec_secy { ...@@ -166,6 +204,7 @@ struct macsec_secy {
u16 key_len; u16 key_len;
u16 icv_len; u16 icv_len;
enum macsec_validation_type validate_frames; enum macsec_validation_type validate_frames;
bool xpn;
bool operational; bool operational;
bool protect_frames; bool protect_frames;
bool replay_protect; bool replay_protect;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment