Commit 6b633e82 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2017-04-20

This adds the basic infrastructure for IPsec hardware
offloading, it creates a configuration API and adjusts
the packet path.

1) Add the needed netdev features to configure IPsec offloads.

2) Add the IPsec hardware offloading API.

3) Prepare the ESP packet path for hardware offloading.

4) Add gso handlers for esp4 and esp6, this implements
   the software fallback for GSO packets.

5) Add xfrm replay handler functions for offloading.

6) Change ESP to use a synchronous crypto algorithm on
   offloading, we don't have the option for asynchronous
   returns when we handle IPsec at layer2.

7) Add a xfrm validate function to validate_xmit_skb. This
   implements the software fallback for non GSO packets.

8) Set the inner_network and inner_transport members of
   the SKB, as well as encapsulation, to reflect the actual
   positions of these headers, and removes them only once
   encryption is done on the payload.
   From Ilan Tayari.

9) Prepare the ESP GRO codepath for hardware offloading.

10) Fix incorrect null pointer check in esp6.
    From Colin Ian King.

11) Fix for the GSO software fallback path to detect the
    fallback correctly.
    From Ilan Tayari.

Please pull or let me know if there are problems.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 77999328 8f92e03e
...@@ -54,8 +54,9 @@ enum { ...@@ -54,8 +54,9 @@ enum {
*/ */
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
NETIF_F_GSO_SCTP_BIT, NETIF_F_GSO_ESP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
...@@ -73,6 +74,8 @@ enum { ...@@ -73,6 +74,8 @@ enum {
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
/* /*
* Add your fresh new feature above and remember to update * Add your fresh new feature above and remember to update
...@@ -129,11 +132,14 @@ enum { ...@@ -129,11 +132,14 @@ enum {
#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_HW_TC __NETIF_F(HW_TC) #define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define for_each_netdev_feature(mask_addr, bit) \ #define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
......
...@@ -823,6 +823,16 @@ struct netdev_xdp { ...@@ -823,6 +823,16 @@ struct netdev_xdp {
}; };
}; };
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
int (*xdo_dev_state_add) (struct xfrm_state *x);
void (*xdo_dev_state_delete) (struct xfrm_state *x);
void (*xdo_dev_state_free) (struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
};
#endif
/* /*
* This structure defines the management hooks for network devices. * This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are * The following hooks can be defined; unless noted otherwise, they are
...@@ -1696,6 +1706,10 @@ struct net_device { ...@@ -1696,6 +1706,10 @@ struct net_device {
const struct ndisc_ops *ndisc_ops; const struct ndisc_ops *ndisc_ops;
#endif #endif
#ifdef CONFIG_XFRM
const struct xfrmdev_ops *xfrmdev_ops;
#endif
const struct header_ops *header_ops; const struct header_ops *header_ops;
unsigned int flags; unsigned int flags;
...@@ -4070,6 +4084,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) ...@@ -4070,6 +4084,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature; return (features & feature) == feature;
} }
......
...@@ -492,6 +492,8 @@ enum { ...@@ -492,6 +492,8 @@ enum {
SKB_GSO_TUNNEL_REMCSUM = 1 << 14, SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
SKB_GSO_SCTP = 1 << 15, SKB_GSO_SCTP = 1 << 15,
SKB_GSO_ESP = 1 << 16,
}; };
#if BITS_PER_LONG > 32 #if BITS_PER_LONG > 32
......
...@@ -10,4 +10,23 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) ...@@ -10,4 +10,23 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
return (struct ip_esp_hdr *)skb_transport_header(skb); return (struct ip_esp_hdr *)skb_transport_header(skb);
} }
struct esp_info {
struct ip_esp_hdr *esph;
__be64 seqno;
int tfclen;
int tailen;
int plen;
int clen;
int len;
int nfrags;
__u8 proto;
bool inplace;
};
int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
int esp_input_done2(struct sk_buff *skb, int err);
int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
int esp6_input_done2(struct sk_buff *skb, int err);
#endif #endif
...@@ -120,6 +120,13 @@ struct xfrm_state_walk { ...@@ -120,6 +120,13 @@ struct xfrm_state_walk {
struct xfrm_address_filter *filter; struct xfrm_address_filter *filter;
}; };
struct xfrm_state_offload {
struct net_device *dev;
unsigned long offload_handle;
unsigned int num_exthdrs;
u8 flags;
};
/* Full description of state of transformer. */ /* Full description of state of transformer. */
struct xfrm_state { struct xfrm_state {
possible_net_t xs_net; possible_net_t xs_net;
...@@ -207,6 +214,8 @@ struct xfrm_state { ...@@ -207,6 +214,8 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft; struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer; struct tasklet_hrtimer mtimer;
struct xfrm_state_offload xso;
/* used to fix curlft->add_time when changing date */ /* used to fix curlft->add_time when changing date */
long saved_tmo; long saved_tmo;
...@@ -222,6 +231,8 @@ struct xfrm_state { ...@@ -222,6 +231,8 @@ struct xfrm_state {
struct xfrm_mode *inner_mode_iaf; struct xfrm_mode *inner_mode_iaf;
struct xfrm_mode *outer_mode; struct xfrm_mode *outer_mode;
const struct xfrm_type_offload *type_offload;
/* Security context */ /* Security context */
struct xfrm_sec_ctx *security; struct xfrm_sec_ctx *security;
...@@ -314,12 +325,14 @@ void km_state_expired(struct xfrm_state *x, int hard, u32 portid); ...@@ -314,12 +325,14 @@ void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
int __xfrm_state_delete(struct xfrm_state *x); int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo { struct xfrm_state_afinfo {
unsigned int family; unsigned int family;
unsigned int proto; unsigned int proto;
__be16 eth_proto; __be16 eth_proto;
struct module *owner; struct module *owner;
const struct xfrm_type *type_map[IPPROTO_MAX]; const struct xfrm_type *type_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX]; const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX];
struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x); int (*init_flags)(struct xfrm_state *x);
void (*init_tempsel)(struct xfrm_selector *sel, void (*init_tempsel)(struct xfrm_selector *sel,
const struct flowi *fl); const struct flowi *fl);
...@@ -380,6 +393,18 @@ struct xfrm_type { ...@@ -380,6 +393,18 @@ struct xfrm_type {
int xfrm_register_type(const struct xfrm_type *type, unsigned short family); int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
struct xfrm_type_offload {
char *description;
struct module *owner;
u8 proto;
void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
};
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
struct xfrm_mode { struct xfrm_mode {
/* /*
* Remove encapsulation header. * Remove encapsulation header.
...@@ -428,6 +453,16 @@ struct xfrm_mode { ...@@ -428,6 +453,16 @@ struct xfrm_mode {
*/ */
int (*output)(struct xfrm_state *x, struct sk_buff *skb); int (*output)(struct xfrm_state *x, struct sk_buff *skb);
/*
* Adjust pointers into the packet and do GSO segmentation.
*/
struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features);
/*
* Adjust pointers into the packet when IPsec is done at layer2.
*/
void (*xmit)(struct xfrm_state *x, struct sk_buff *skb);
struct xfrm_state_afinfo *afinfo; struct xfrm_state_afinfo *afinfo;
struct module *owner; struct module *owner;
unsigned int encap; unsigned int encap;
...@@ -1532,6 +1567,7 @@ struct xfrmk_spdinfo { ...@@ -1532,6 +1567,7 @@ struct xfrmk_spdinfo {
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x); int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
...@@ -1614,6 +1650,11 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1614,6 +1650,11 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
} }
#endif #endif
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family);
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
...@@ -1819,6 +1860,61 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) ...@@ -1819,6 +1860,61 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
} }
#endif #endif
#ifdef CONFIG_XFRM_OFFLOAD
void __net_init xfrm_dev_init(void);
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
if (xso->dev)
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
}
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
dev_put(dev);
}
}
#else
static inline void __net_init xfrm_dev_init(void)
{
}
static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
return 0;
}
static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
{
return 0;
}
static inline void xfrm_dev_state_delete(struct xfrm_state *x)
{
}
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
}
static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
return false;
}
#endif
static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
{ {
if (attrs[XFRMA_MARK]) if (attrs[XFRMA_MARK])
......
...@@ -303,6 +303,7 @@ enum xfrm_attr_type_t { ...@@ -303,6 +303,7 @@ enum xfrm_attr_type_t {
XFRMA_PROTO, /* __u8 */ XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD, XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
__XFRMA_MAX __XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1) #define XFRMA_MAX (__XFRMA_MAX - 1)
...@@ -494,6 +495,13 @@ struct xfrm_address_filter { ...@@ -494,6 +495,13 @@ struct xfrm_address_filter {
__u8 dplen; __u8 dplen;
}; };
struct xfrm_user_offload {
int ifindex;
__u8 flags;
};
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
#ifndef __KERNEL__ #ifndef __KERNEL__
/* backwards compatibility for userspace */ /* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1 #define XFRMGRP_ACQUIRE 1
......
...@@ -2972,6 +2972,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -2972,6 +2972,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
__skb_linearize(skb)) __skb_linearize(skb))
goto out_kfree_skb; goto out_kfree_skb;
if (validate_xmit_xfrm(skb, features))
goto out_kfree_skb;
/* If packet is not checksummed and device does not /* If packet is not checksummed and device does not
* support checksumming for this protocol, complete * support checksumming for this protocol, complete
* checksumming here. * checksumming here.
......
...@@ -90,6 +90,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] ...@@ -90,6 +90,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
[NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
[NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
[NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
...@@ -103,6 +104,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] ...@@ -103,6 +104,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RXALL_BIT] = "rx-all", [NETIF_F_RXALL_BIT] = "rx-all",
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
[NETIF_F_HW_TC_BIT] = "hw-tc-offload", [NETIF_F_HW_TC_BIT] = "hw-tc-offload",
[NETIF_F_HW_ESP_BIT] = "esp-hw-offload",
[NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload",
}; };
static const char static const char
......
...@@ -152,21 +152,28 @@ static void esp_output_restore_header(struct sk_buff *skb) ...@@ -152,21 +152,28 @@ static void esp_output_restore_header(struct sk_buff *skb)
} }
static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
struct xfrm_state *x,
struct ip_esp_hdr *esph, struct ip_esp_hdr *esph,
struct esp_output_extra *extra) struct esp_output_extra *extra)
{ {
struct xfrm_state *x = skb_dst(skb)->xfrm;
/* For ESN we move the header forward by 4 bytes to /* For ESN we move the header forward by 4 bytes to
* accomodate the high bits. We will move it back after * accomodate the high bits. We will move it back after
* encryption. * encryption.
*/ */
if ((x->props.flags & XFRM_STATE_ESN)) { if ((x->props.flags & XFRM_STATE_ESN)) {
__u32 seqhi;
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo)
seqhi = xo->seq.hi;
else
seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
extra->esphoff = (unsigned char *)esph - extra->esphoff = (unsigned char *)esph -
skb_transport_header(skb); skb_transport_header(skb);
esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
extra->seqhi = esph->spi; extra->seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); esph->seq_no = htonl(seqhi);
} }
esph->spi = x->id.spi; esph->spi = x->id.spi;
...@@ -198,98 +205,56 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) ...@@ -198,98 +205,56 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
tail[plen - 1] = proto; tail[plen - 1] = proto;
} }
static int esp_output(struct xfrm_state *x, struct sk_buff *skb) static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{ {
struct esp_output_extra *extra; int encap_type;
int err = -ENOMEM; struct udphdr *uh;
struct ip_esp_hdr *esph; __be32 *udpdata32;
struct crypto_aead *aead; __be16 sport, dport;
struct aead_request *req; struct xfrm_encap_tmpl *encap = x->encap;
struct scatterlist *sg, *dsg; struct ip_esp_hdr *esph = esp->esph;
struct sk_buff *trailer;
struct page *page; spin_lock_bh(&x->lock);
void *tmp; sport = encap->encap_sport;
u8 *iv; dport = encap->encap_dport;
u8 *tail; encap_type = encap->encap_type;
u8 *vaddr; spin_unlock_bh(&x->lock);
int blksize;
int clen; uh = (struct udphdr *)esph;
int alen; uh->source = sport;
int plen; uh->dest = dport;
int ivlen; uh->len = htons(skb->len + esp->tailen
int tfclen; - skb_transport_offset(skb));
int nfrags; uh->check = 0;
int assoclen;
int extralen; switch (encap_type) {
int tailen; default:
__be64 seqno; case UDP_ENCAP_ESPINUDP:
__u8 proto = *skb_mac_header(skb); esph = (struct ip_esp_hdr *)(uh + 1);
break;
/* skb is pure payload to encrypt */ case UDP_ENCAP_ESPINUDP_NON_IKE:
udpdata32 = (__be32 *)(uh + 1);
aead = x->data; udpdata32[0] = udpdata32[1] = 0;
alen = crypto_aead_authsize(aead); esph = (struct ip_esp_hdr *)(udpdata32 + 2);
ivlen = crypto_aead_ivsize(aead); break;
tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
tfclen = padto - skb->len;
} }
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
tailen = tfclen + plen + alen;
assoclen = sizeof(*esph);
extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) { *skb_mac_header(skb) = IPPROTO_UDP;
extralen += sizeof(*extra); esp->esph = esph;
assoclen += sizeof(__be32); }
}
*skb_mac_header(skb) = IPPROTO_ESP; int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
esph = ip_esp_hdr(skb); {
u8 *tail;
u8 *vaddr;
int nfrags;
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
/* this is non-NULL only with UDP Encapsulation */ /* this is non-NULL only with UDP Encapsulation */
if (x->encap) { if (x->encap)
struct xfrm_encap_tmpl *encap = x->encap; esp_output_udp_encap(x, skb, esp);
struct udphdr *uh;
__be32 *udpdata32;
__be16 sport, dport;
int encap_type;
spin_lock_bh(&x->lock);
sport = encap->encap_sport;
dport = encap->encap_dport;
encap_type = encap->encap_type;
spin_unlock_bh(&x->lock);
uh = (struct udphdr *)esph;
uh->source = sport;
uh->dest = dport;
uh->len = htons(skb->len + tailen
- skb_transport_offset(skb));
uh->check = 0;
switch (encap_type) {
default:
case UDP_ENCAP_ESPINUDP:
esph = (struct ip_esp_hdr *)(uh + 1);
break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
udpdata32 = (__be32 *)(uh + 1);
udpdata32[0] = udpdata32[1] = 0;
esph = (struct ip_esp_hdr *)(udpdata32 + 2);
break;
}
*skb_mac_header(skb) = IPPROTO_UDP;
}
if (!skb_cloned(skb)) { if (!skb_cloned(skb)) {
if (tailen <= skb_availroom(skb)) { if (tailen <= skb_availroom(skb)) {
...@@ -304,6 +269,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -304,6 +269,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
struct page_frag *pfrag = &x->xfrag; struct page_frag *pfrag = &x->xfrag;
esp->inplace = false;
allocsize = ALIGN(tailen, L1_CACHE_BYTES); allocsize = ALIGN(tailen, L1_CACHE_BYTES);
spin_lock_bh(&x->lock); spin_lock_bh(&x->lock);
...@@ -320,10 +287,12 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -320,10 +287,12 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
tail = vaddr + pfrag->offset; tail = vaddr + pfrag->offset;
esp_output_fill_trailer(tail, tfclen, plen, proto); esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
spin_unlock_bh(&x->lock);
nfrags = skb_shinfo(skb)->nr_frags; nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset, __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
...@@ -339,76 +308,56 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -339,76 +308,56 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
if (sk) if (sk)
atomic_add(tailen, &sk->sk_wmem_alloc); atomic_add(tailen, &sk->sk_wmem_alloc);
skb_push(skb, -skb_network_offset(skb)); goto out;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
esph->spi = x->id.spi;
tmp = esp_alloc_tmp(aead, nfrags + 2, extralen);
if (!tmp) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
extra = esp_tmp_extra(tmp);
iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
dsg = &sg[nfrags];
esph = esp_output_set_extra(skb, esph, extra);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
skb_shinfo(skb)->nr_frags = 1;
page = pfrag->page;
get_page(page);
/* replace page frags in skb with new page */
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
pfrag->offset = pfrag->offset + allocsize;
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb_to_sgvec(skb, dsg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
spin_unlock_bh(&x->lock);
goto skip_cow2;
} }
} }
cow: cow:
err = skb_cow_data(skb, tailen, &trailer); nfrags = skb_cow_data(skb, tailen, &trailer);
if (err < 0) if (nfrags < 0)
goto error; goto out;
nfrags = err;
tail = skb_tail_pointer(trailer); tail = skb_tail_pointer(trailer);
esph = ip_esp_hdr(skb);
skip_cow: skip_cow:
esp_output_fill_trailer(tail, tfclen, plen, proto); esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
pskb_put(skb, trailer, tailen);
pskb_put(skb, trailer, clen - skb->len + alen); out:
skb_push(skb, -skb_network_offset(skb)); return nfrags;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); }
esph->spi = x->id.spi; EXPORT_SYMBOL_GPL(esp_output_head);
tmp = esp_alloc_tmp(aead, nfrags, extralen); int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{
u8 *iv;
int alen;
void *tmp;
int ivlen;
int assoclen;
int extralen;
struct page *page;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
struct scatterlist *sg, *dsg;
struct esp_output_extra *extra;
int err = -ENOMEM;
assoclen = sizeof(struct ip_esp_hdr);
extralen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
extralen += sizeof(*extra);
assoclen += sizeof(__be32);
}
aead = x->data;
alen = crypto_aead_authsize(aead);
ivlen = crypto_aead_ivsize(aead);
tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
if (!tmp) { if (!tmp) {
spin_unlock_bh(&x->lock);
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
} }
...@@ -417,29 +366,58 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -417,29 +366,58 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
iv = esp_tmp_iv(aead, tmp, extralen); iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv); req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req); sg = esp_req_sg(aead, req);
dsg = sg;
esph = esp_output_set_extra(skb, esph, extra); if (esp->inplace)
dsg = sg;
else
dsg = &sg[esp->nfrags];
sg_init_table(sg, nfrags); esph = esp_output_set_extra(skb, x, esp->esph, extra);
esp->esph = esph;
sg_init_table(sg, esp->nfrags);
skb_to_sgvec(skb, sg, skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data, (unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen); assoclen + ivlen + esp->clen + alen);
if (!esp->inplace) {
int allocsize;
struct page_frag *pfrag = &x->xfrag;
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
skb_shinfo(skb)->nr_frags = 1;
page = pfrag->page;
get_page(page);
/* replace page frags in skb with new page */
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb_to_sgvec(skb, dsg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
}
skip_cow2:
if ((x->props.flags & XFRM_STATE_ESN)) if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_output_done_esn, skb); aead_request_set_callback(req, 0, esp_output_done_esn, skb);
else else
aead_request_set_callback(req, 0, esp_output_done, skb); aead_request_set_callback(req, 0, esp_output_done, skb);
aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
aead_request_set_ad(req, assoclen); aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
memset(iv, 0, ivlen); memset(iv, 0, ivlen);
memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
min(ivlen, 8)); min(ivlen, 8));
ESP_SKB_CB(skb)->tmp = tmp; ESP_SKB_CB(skb)->tmp = tmp;
...@@ -465,11 +443,63 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -465,11 +443,63 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
error: error:
return err; return err;
} }
EXPORT_SYMBOL_GPL(esp_output_tail);
static int esp_input_done2(struct sk_buff *skb, int err) static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int alen;
int blksize;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
esp.inplace = true;
esp.proto = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
/* skb is pure payload to encrypt */
aead = x->data;
alen = crypto_aead_authsize(aead);
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
esp.plen = esp.clen - skb->len - esp.tfclen;
esp.tailen = esp.tfclen + esp.plen + alen;
esp.esph = ip_esp_hdr(skb);
esp.nfrags = esp_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
esph = esp.esph;
esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
skb_push(skb, -skb_network_offset(skb));
return esp_output_tail(x, skb, &esp);
}
int esp_input_done2(struct sk_buff *skb, int err)
{ {
const struct iphdr *iph; const struct iphdr *iph;
struct xfrm_state *x = xfrm_input_state(skb); struct xfrm_state *x = xfrm_input_state(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
struct crypto_aead *aead = x->data; struct crypto_aead *aead = x->data;
int alen = crypto_aead_authsize(aead); int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
...@@ -478,7 +508,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) ...@@ -478,7 +508,8 @@ static int esp_input_done2(struct sk_buff *skb, int err)
u8 nexthdr[2]; u8 nexthdr[2];
int padlen; int padlen;
kfree(ESP_SKB_CB(skb)->tmp); if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
kfree(ESP_SKB_CB(skb)->tmp);
if (unlikely(err)) if (unlikely(err))
goto out; goto out;
...@@ -549,6 +580,7 @@ static int esp_input_done2(struct sk_buff *skb, int err) ...@@ -549,6 +580,7 @@ static int esp_input_done2(struct sk_buff *skb, int err)
out: out:
return err; return err;
} }
EXPORT_SYMBOL_GPL(esp_input_done2);
static void esp_input_done(struct crypto_async_request *base, int err) static void esp_input_done(struct crypto_async_request *base, int err)
{ {
...@@ -751,13 +783,17 @@ static int esp_init_aead(struct xfrm_state *x) ...@@ -751,13 +783,17 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME]; char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead; struct crypto_aead *aead;
int err; int err;
u32 mask = 0;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error; goto error;
aead = crypto_alloc_aead(aead_name, 0, 0); if (x->xso.offload_handle)
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(aead_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
...@@ -787,6 +823,7 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -787,6 +823,7 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME]; char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen; unsigned int keylen;
int err; int err;
u32 mask = 0;
err = -EINVAL; err = -EINVAL;
if (!x->ealg) if (!x->ealg)
...@@ -812,7 +849,10 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -812,7 +849,10 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error; goto error;
} }
aead = crypto_alloc_aead(authenc_name, 0, 0); if (x->xso.offload_handle)
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(authenc_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
...@@ -931,7 +971,7 @@ static const struct xfrm_type esp_type = ...@@ -931,7 +971,7 @@ static const struct xfrm_type esp_type =
.destructor = esp_destroy, .destructor = esp_destroy,
.get_mtu = esp4_get_mtu, .get_mtu = esp4_get_mtu,
.input = esp_input, .input = esp_input,
.output = esp_output .output = esp_output,
}; };
static struct xfrm4_protocol esp4_protocol = { static struct xfrm4_protocol esp4_protocol = {
......
...@@ -43,27 +43,31 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, ...@@ -43,27 +43,31 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out; goto out;
err = secpath_set(skb); xo = xfrm_offload(skb);
if (err) if (!xo || !(xo->flags & CRYPTO_DONE)) {
goto out; err = secpath_set(skb);
if (err)
goto out;
if (skb->sp->len == XFRM_MAX_DEPTH) if (skb->sp->len == XFRM_MAX_DEPTH)
goto out; goto out;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ip_hdr(skb)->daddr, (xfrm_address_t *)&ip_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET); spi, IPPROTO_ESP, AF_INET);
if (!x) if (!x)
goto out; goto out;
skb->sp->xvec[skb->sp->len++] = x; skb->sp->xvec[skb->sp->len++] = x;
skb->sp->olen++; skb->sp->olen++;
xo = xfrm_offload(skb); xo = xfrm_offload(skb);
if (!xo) { if (!xo) {
xfrm_state_put(x); xfrm_state_put(x);
goto out; goto out;
}
} }
xo->flags |= XFRM_GRO; xo->flags |= XFRM_GRO;
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
...@@ -84,19 +88,214 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, ...@@ -84,19 +88,214 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
return NULL; return NULL;
} }
static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct iphdr *iph = ip_hdr(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
int proto = iph->protocol;
skb_push(skb, -skb_network_offset(skb));
esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
xo->proto = proto;
}
static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
goto out;
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
goto out;
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out;
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
}
static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
return -EINVAL;
skb->ip_summed = CHECKSUM_NONE;
return esp_input_done2(skb, 0);
}
static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{
int err;
int alen;
int blksize;
struct xfrm_offload *xo;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
esp.inplace = true;
xo = xfrm_offload(skb);
if (!xo)
return -EINVAL;
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev)) {
xo->flags |= CRYPTO_FALLBACK;
hw_offload = false;
}
esp.proto = xo->proto;
/* skb is pure payload to encrypt */
aead = x->data;
alen = crypto_aead_authsize(aead);
esp.tfclen = 0;
/* XXX: Add support for tfc padding here. */
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
esp.plen = esp.clen - skb->len - esp.tfclen;
esp.tailen = esp.tfclen + esp.plen + alen;
esp.esph = ip_esp_hdr(skb);
if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
esp.nfrags = esp_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
}
esph = esp.esph;
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low);
} else {
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
}
if (hw_offload)
return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp_output_tail(x, skb, &esp);
if (err < 0)
return err;
secpath_reset(skb);
return 0;
}
static const struct net_offload esp4_offload = { static const struct net_offload esp4_offload = {
.callbacks = { .callbacks = {
.gro_receive = esp4_gro_receive, .gro_receive = esp4_gro_receive,
.gso_segment = esp4_gso_segment,
}, },
}; };
static const struct xfrm_type_offload esp_type_offload = {
.description = "ESP4 OFFLOAD",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.input_tail = esp_input_tail,
.xmit = esp_xmit,
.encap = esp4_gso_encap,
};
static int __init esp4_offload_init(void) static int __init esp4_offload_init(void)
{ {
if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
pr_info("%s: can't add xfrm type offload\n", __func__);
return -EAGAIN;
}
return inet_add_offload(&esp4_offload, IPPROTO_ESP); return inet_add_offload(&esp4_offload, IPPROTO_ESP);
} }
static void __exit esp4_offload_exit(void) static void __exit esp4_offload_exit(void)
{ {
if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0)
pr_info("%s: can't remove xfrm type offload\n", __func__);
inet_del_offload(&esp4_offload, IPPROTO_ESP); inet_del_offload(&esp4_offload, IPPROTO_ESP);
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <net/dst.h> #include <net/dst.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/protocol.h>
/* Add encapsulation header. /* Add encapsulation header.
* *
...@@ -23,6 +24,8 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -23,6 +24,8 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4; int ihl = iph->ihl * 4;
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len); skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header + skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol); offsetof(struct iphdr, protocol);
...@@ -56,9 +59,40 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -56,9 +59,40 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
return 0; return 0;
} }
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
const struct net_offload *ops;
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct xfrm_offload *xo = xfrm_offload(skb);
skb->transport_header += x->props.header_len;
ops = rcu_dereference(inet_offloads[xo->proto]);
if (likely(ops && ops->callbacks.gso_segment))
segs = ops->callbacks.gso_segment(skb, features);
return segs;
}
static void xfrm4_transport_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + sizeof(struct iphdr) + x->props.header_len);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb_reset_transport_header(skb);
skb->transport_header -= x->props.header_len;
}
}
static struct xfrm_mode xfrm4_transport_mode = { static struct xfrm_mode xfrm4_transport_mode = {
.input = xfrm4_transport_input, .input = xfrm4_transport_input,
.output = xfrm4_transport_output, .output = xfrm4_transport_output,
.gso_segment = xfrm4_transport_gso_segment,
.xmit = xfrm4_transport_xmit,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.encap = XFRM_MODE_TRANSPORT, .encap = XFRM_MODE_TRANSPORT,
}; };
......
...@@ -33,6 +33,9 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -33,6 +33,9 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *top_iph; struct iphdr *top_iph;
int flags; int flags;
skb_set_inner_network_header(skb, skb_network_offset(skb));
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len); skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header + skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol); offsetof(struct iphdr, protocol);
...@@ -96,11 +99,36 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -96,11 +99,36 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
return err; return err;
} }
static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
}
static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header +
sizeof(struct iphdr);
}
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
}
static struct xfrm_mode xfrm4_tunnel_mode = { static struct xfrm_mode xfrm4_tunnel_mode = {
.input2 = xfrm4_mode_tunnel_input, .input2 = xfrm4_mode_tunnel_input,
.input = xfrm_prepare_input, .input = xfrm_prepare_input,
.output2 = xfrm4_mode_tunnel_output, .output2 = xfrm4_mode_tunnel_output,
.output = xfrm4_prepare_output, .output = xfrm4_prepare_output,
.gso_segment = xfrm4_mode_tunnel_gso_segment,
.xmit = xfrm4_mode_tunnel_xmit,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.encap = XFRM_MODE_TUNNEL, .encap = XFRM_MODE_TUNNEL,
.flags = XFRM_MODE_FLAG_TUNNEL, .flags = XFRM_MODE_FLAG_TUNNEL,
......
...@@ -29,7 +29,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) ...@@ -29,7 +29,8 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
goto out; goto out;
mtu = dst_mtu(skb_dst(skb)); mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) { if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) {
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IP);
if (skb->sk) if (skb->sk)
......
...@@ -170,19 +170,23 @@ static void esp_output_restore_header(struct sk_buff *skb) ...@@ -170,19 +170,23 @@ static void esp_output_restore_header(struct sk_buff *skb)
} }
static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
struct xfrm_state *x,
struct ip_esp_hdr *esph, struct ip_esp_hdr *esph,
__be32 *seqhi) __be32 *seqhi)
{ {
struct xfrm_state *x = skb_dst(skb)->xfrm;
/* For ESN we move the header forward by 4 bytes to /* For ESN we move the header forward by 4 bytes to
* accomodate the high bits. We will move it back after * accomodate the high bits. We will move it back after
* encryption. * encryption.
*/ */
if ((x->props.flags & XFRM_STATE_ESN)) { if ((x->props.flags & XFRM_STATE_ESN)) {
struct xfrm_offload *xo = xfrm_offload(skb);
esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
*seqhi = esph->spi; *seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); if (xo)
esph->seq_no = htonl(xo->seq.hi);
else
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
} }
esph->spi = x->id.spi; esph->spi = x->id.spi;
...@@ -214,61 +218,16 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto) ...@@ -214,61 +218,16 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
tail[plen - 1] = proto; tail[plen - 1] = proto;
} }
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
{ {
int err;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
struct scatterlist *sg, *dsg;
struct sk_buff *trailer;
struct page *page;
void *tmp;
int blksize;
int clen;
int alen;
int plen;
int ivlen;
int tfclen;
int nfrags;
int assoclen;
int seqhilen;
int tailen;
u8 *iv;
u8 *tail; u8 *tail;
u8 *vaddr; u8 *vaddr;
__be32 *seqhi; int nfrags;
__be64 seqno; struct page *page;
__u8 proto = *skb_mac_header(skb); struct ip_esp_hdr *esph;
struct sk_buff *trailer;
/* skb is pure payload to encrypt */ int tailen = esp->tailen;
aead = x->data;
alen = crypto_aead_authsize(aead);
ivlen = crypto_aead_ivsize(aead);
tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
tfclen = padto - skb->len;
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
tailen = tfclen + plen + alen;
assoclen = sizeof(*esph);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
seqhilen += sizeof(__be32);
assoclen += seqhilen;
}
*skb_mac_header(skb) = IPPROTO_ESP;
esph = ip_esp_hdr(skb); esph = ip_esp_hdr(skb);
if (!skb_cloned(skb)) { if (!skb_cloned(skb)) {
...@@ -284,6 +243,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -284,6 +243,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
struct page_frag *pfrag = &x->xfrag; struct page_frag *pfrag = &x->xfrag;
esp->inplace = false;
allocsize = ALIGN(tailen, L1_CACHE_BYTES); allocsize = ALIGN(tailen, L1_CACHE_BYTES);
spin_lock_bh(&x->lock); spin_lock_bh(&x->lock);
...@@ -300,10 +261,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -300,10 +261,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
tail = vaddr + pfrag->offset; tail = vaddr + pfrag->offset;
esp_output_fill_trailer(tail, tfclen, plen, proto); esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
spin_unlock_bh(&x->lock);
nfrags = skb_shinfo(skb)->nr_frags; nfrags = skb_shinfo(skb)->nr_frags;
__skb_fill_page_desc(skb, nfrags, page, pfrag->offset, __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
...@@ -319,77 +282,56 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -319,77 +282,56 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
if (sk) if (sk)
atomic_add(tailen, &sk->sk_wmem_alloc); atomic_add(tailen, &sk->sk_wmem_alloc);
skb_push(skb, -skb_network_offset(skb)); goto out;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
esph->spi = x->id.spi;
tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
if (!tmp) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
dsg = &sg[nfrags];
esph = esp_output_set_esn(skb, esph, seqhi);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
skb_shinfo(skb)->nr_frags = 1;
page = pfrag->page;
get_page(page);
/* replace page frags in skb with new page */
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
pfrag->offset = pfrag->offset + allocsize;
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb_to_sgvec(skb, dsg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
spin_unlock_bh(&x->lock);
goto skip_cow2;
} }
} }
cow: cow:
err = skb_cow_data(skb, tailen, &trailer); nfrags = skb_cow_data(skb, tailen, &trailer);
if (err < 0) if (nfrags < 0)
goto error; goto out;
nfrags = err;
tail = skb_tail_pointer(trailer); tail = skb_tail_pointer(trailer);
esph = ip_esp_hdr(skb);
skip_cow: skip_cow:
esp_output_fill_trailer(tail, tfclen, plen, proto); esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
pskb_put(skb, trailer, tailen);
pskb_put(skb, trailer, clen - skb->len + alen); out:
skb_push(skb, -skb_network_offset(skb)); return nfrags;
}
EXPORT_SYMBOL_GPL(esp6_output_head);
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
esph->spi = x->id.spi; {
u8 *iv;
int alen;
void *tmp;
int ivlen;
int assoclen;
int seqhilen;
__be32 *seqhi;
struct page *page;
struct ip_esp_hdr *esph;
struct aead_request *req;
struct crypto_aead *aead;
struct scatterlist *sg, *dsg;
int err = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags, seqhilen); assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
seqhilen += sizeof(__be32);
assoclen += sizeof(__be32);
}
aead = x->data;
alen = crypto_aead_authsize(aead);
ivlen = crypto_aead_ivsize(aead);
tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen);
if (!tmp) { if (!tmp) {
spin_unlock_bh(&x->lock);
err = -ENOMEM; err = -ENOMEM;
goto error; goto error;
} }
...@@ -398,29 +340,57 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -398,29 +340,57 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
iv = esp_tmp_iv(aead, tmp, seqhilen); iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv); req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req); sg = esp_req_sg(aead, req);
dsg = sg;
esph = esp_output_set_esn(skb, esph, seqhi); if (esp->inplace)
dsg = sg;
else
dsg = &sg[esp->nfrags];
sg_init_table(sg, nfrags); esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi);
sg_init_table(sg, esp->nfrags);
skb_to_sgvec(skb, sg, skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data, (unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen); assoclen + ivlen + esp->clen + alen);
if (!esp->inplace) {
int allocsize;
struct page_frag *pfrag = &x->xfrag;
allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
spin_lock_bh(&x->lock);
if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
spin_unlock_bh(&x->lock);
err = -ENOMEM;
goto error;
}
skb_shinfo(skb)->nr_frags = 1;
page = pfrag->page;
get_page(page);
/* replace page frags in skb with new page */
__skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
pfrag->offset = pfrag->offset + allocsize;
spin_unlock_bh(&x->lock);
sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
skb_to_sgvec(skb, dsg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + esp->clen + alen);
}
skip_cow2:
if ((x->props.flags & XFRM_STATE_ESN)) if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_output_done_esn, skb); aead_request_set_callback(req, 0, esp_output_done_esn, skb);
else else
aead_request_set_callback(req, 0, esp_output_done, skb); aead_request_set_callback(req, 0, esp_output_done, skb);
aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
aead_request_set_ad(req, assoclen); aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
memset(iv, 0, ivlen); memset(iv, 0, ivlen);
memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
min(ivlen, 8)); min(ivlen, 8));
ESP_SKB_CB(skb)->tmp = tmp; ESP_SKB_CB(skb)->tmp = tmp;
...@@ -446,10 +416,60 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -446,10 +416,60 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
error: error:
return err; return err;
} }
EXPORT_SYMBOL_GPL(esp6_output_tail);
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
int alen;
int blksize;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
esp.inplace = true;
esp.proto = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
/* skb is pure payload to encrypt */
aead = x->data;
alen = crypto_aead_authsize(aead);
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
esp.plen = esp.clen - skb->len - esp.tfclen;
esp.tailen = esp.tfclen + esp.plen + alen;
esp.nfrags = esp6_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
esph = ip_esp_hdr(skb);
esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
skb_push(skb, -skb_network_offset(skb));
return esp6_output_tail(x, skb, &esp);
}
static int esp_input_done2(struct sk_buff *skb, int err) int esp6_input_done2(struct sk_buff *skb, int err)
{ {
struct xfrm_state *x = xfrm_input_state(skb); struct xfrm_state *x = xfrm_input_state(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
struct crypto_aead *aead = x->data; struct crypto_aead *aead = x->data;
int alen = crypto_aead_authsize(aead); int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
...@@ -458,7 +478,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) ...@@ -458,7 +478,8 @@ static int esp_input_done2(struct sk_buff *skb, int err)
int padlen; int padlen;
u8 nexthdr[2]; u8 nexthdr[2];
kfree(ESP_SKB_CB(skb)->tmp); if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
kfree(ESP_SKB_CB(skb)->tmp);
if (unlikely(err)) if (unlikely(err))
goto out; goto out;
...@@ -492,12 +513,13 @@ static int esp_input_done2(struct sk_buff *skb, int err) ...@@ -492,12 +513,13 @@ static int esp_input_done2(struct sk_buff *skb, int err)
out: out:
return err; return err;
} }
EXPORT_SYMBOL_GPL(esp6_input_done2);
static void esp_input_done(struct crypto_async_request *base, int err) static void esp_input_done(struct crypto_async_request *base, int err)
{ {
struct sk_buff *skb = base->data; struct sk_buff *skb = base->data;
xfrm_input_resume(skb, esp_input_done2(skb, err)); xfrm_input_resume(skb, esp6_input_done2(skb, err));
} }
static void esp_input_restore_header(struct sk_buff *skb) static void esp_input_restore_header(struct sk_buff *skb)
...@@ -619,7 +641,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -619,7 +641,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
if ((x->props.flags & XFRM_STATE_ESN)) if ((x->props.flags & XFRM_STATE_ESN))
esp_input_restore_header(skb); esp_input_restore_header(skb);
ret = esp_input_done2(skb, ret); ret = esp6_input_done2(skb, ret);
out: out:
return ret; return ret;
...@@ -682,13 +704,17 @@ static int esp_init_aead(struct xfrm_state *x) ...@@ -682,13 +704,17 @@ static int esp_init_aead(struct xfrm_state *x)
char aead_name[CRYPTO_MAX_ALG_NAME]; char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead; struct crypto_aead *aead;
int err; int err;
u32 mask = 0;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error; goto error;
aead = crypto_alloc_aead(aead_name, 0, 0); if (x->xso.offload_handle)
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(aead_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
...@@ -718,6 +744,7 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -718,6 +744,7 @@ static int esp_init_authenc(struct xfrm_state *x)
char authenc_name[CRYPTO_MAX_ALG_NAME]; char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen; unsigned int keylen;
int err; int err;
u32 mask = 0;
err = -EINVAL; err = -EINVAL;
if (!x->ealg) if (!x->ealg)
...@@ -743,7 +770,10 @@ static int esp_init_authenc(struct xfrm_state *x) ...@@ -743,7 +770,10 @@ static int esp_init_authenc(struct xfrm_state *x)
goto error; goto error;
} }
aead = crypto_alloc_aead(authenc_name, 0, 0); if (x->xso.offload_handle)
mask |= CRYPTO_ALG_ASYNC;
aead = crypto_alloc_aead(authenc_name, 0, mask);
err = PTR_ERR(aead); err = PTR_ERR(aead);
if (IS_ERR(aead)) if (IS_ERR(aead))
goto error; goto error;
......
...@@ -45,27 +45,31 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, ...@@ -45,27 +45,31 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
goto out; goto out;
err = secpath_set(skb); xo = xfrm_offload(skb);
if (err) if (!xo || !(xo->flags & CRYPTO_DONE)) {
goto out; err = secpath_set(skb);
if (err)
goto out;
if (skb->sp->len == XFRM_MAX_DEPTH) if (skb->sp->len == XFRM_MAX_DEPTH)
goto out; goto out;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
(xfrm_address_t *)&ipv6_hdr(skb)->daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
spi, IPPROTO_ESP, AF_INET6); spi, IPPROTO_ESP, AF_INET6);
if (!x) if (!x)
goto out; goto out;
skb->sp->xvec[skb->sp->len++] = x; skb->sp->xvec[skb->sp->len++] = x;
skb->sp->olen++; skb->sp->olen++;
xo = xfrm_offload(skb); xo = xfrm_offload(skb);
if (!xo) { if (!xo) {
xfrm_state_put(x); xfrm_state_put(x);
goto out; goto out;
}
} }
xo->flags |= XFRM_GRO; xo->flags |= XFRM_GRO;
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
...@@ -86,19 +90,216 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, ...@@ -86,19 +90,216 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
return NULL; return NULL;
} }
static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct ipv6hdr *iph = ipv6_hdr(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
int proto = iph->nexthdr;
skb_push(skb, -skb_network_offset(skb));
esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
esph->spi = x->id.spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
xo->proto = proto;
}
static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
__u32 seq;
int err = 0;
struct sk_buff *skb2;
struct xfrm_state *x;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct sk_buff *segs = ERR_PTR(-EINVAL);
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
if (!xo)
goto out;
seq = xo->seq.low;
x = skb->sp->xvec[skb->sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
if (esph->spi != x->id.spi)
goto out;
if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
goto out;
__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
skb->encap_hdr_csum = 1;
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
segs = x->outer_mode->gso_segment(x, skb, esp_features);
if (IS_ERR_OR_NULL(segs))
goto out;
__skb_pull(skb, skb->data - skb_mac_header(skb));
skb2 = segs;
do {
struct sk_buff *nskb = skb2->next;
xo = xfrm_offload(skb2);
xo->flags |= XFRM_GSO_SEGMENT;
xo->seq.low = seq;
xo->seq.hi = xfrm_replay_seqhi(x, seq);
if(!(features & NETIF_F_HW_ESP))
xo->flags |= CRYPTO_FALLBACK;
x->outer_mode->xmit(x, skb2);
err = x->type_offload->xmit(x, skb2, esp_features);
if (err) {
kfree_skb_list(segs);
return ERR_PTR(err);
}
if (!skb_is_gso(skb2))
seq++;
else
seq += skb_shinfo(skb2)->gso_segs;
skb_push(skb2, skb2->mac_len);
skb2 = nskb;
} while (skb2);
out:
return segs;
}
static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
return -EINVAL;
skb->ip_summed = CHECKSUM_NONE;
return esp6_input_done2(skb, 0);
}
static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
{
int err;
int alen;
int blksize;
struct xfrm_offload *xo;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
esp.inplace = true;
xo = xfrm_offload(skb);
if (!xo)
return -EINVAL;
if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
(x->xso.dev != skb->dev)) {
xo->flags |= CRYPTO_FALLBACK;
hw_offload = false;
}
esp.proto = xo->proto;
/* skb is pure payload to encrypt */
aead = x->data;
alen = crypto_aead_authsize(aead);
esp.tfclen = 0;
/* XXX: Add support for tfc padding here. */
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
esp.plen = esp.clen - skb->len - esp.tfclen;
esp.tailen = esp.tfclen + esp.plen + alen;
if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
esp.nfrags = esp6_output_head(x, skb, &esp);
if (esp.nfrags < 0)
return esp.nfrags;
}
esph = ip_esp_hdr(skb);
esph->spi = x->id.spi;
skb_push(skb, -skb_network_offset(skb));
if (xo->flags & XFRM_GSO_SEGMENT) {
esph->seq_no = htonl(xo->seq.low);
} else {
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
}
if (hw_offload)
return 0;
esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
err = esp6_output_tail(x, skb, &esp);
if (err < 0)
return err;
secpath_reset(skb);
return 0;
}
static const struct net_offload esp6_offload = { static const struct net_offload esp6_offload = {
.callbacks = { .callbacks = {
.gro_receive = esp6_gro_receive, .gro_receive = esp6_gro_receive,
.gso_segment = esp6_gso_segment,
}, },
}; };
static const struct xfrm_type_offload esp6_type_offload = {
.description = "ESP6 OFFLOAD",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.input_tail = esp6_input_tail,
.xmit = esp6_xmit,
.encap = esp6_gso_encap,
};
static int __init esp6_offload_init(void) static int __init esp6_offload_init(void)
{ {
if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
pr_info("%s: can't add xfrm type offload\n", __func__);
return -EAGAIN;
}
return inet6_add_offload(&esp6_offload, IPPROTO_ESP); return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
} }
static void __exit esp6_offload_exit(void) static void __exit esp6_offload_exit(void)
{ {
if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0)
pr_info("%s: can't remove xfrm type offload\n", __func__);
inet6_del_offload(&esp6_offload, IPPROTO_ESP); inet6_del_offload(&esp6_offload, IPPROTO_ESP);
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <net/dst.h> #include <net/dst.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/xfrm.h> #include <net/xfrm.h>
#include <net/protocol.h>
/* Add encapsulation header. /* Add encapsulation header.
* *
...@@ -26,6 +27,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -26,6 +27,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
int hdr_len; int hdr_len;
iph = ipv6_hdr(skb); iph = ipv6_hdr(skb);
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
hdr_len = x->type->hdr_offset(x, skb, &prevhdr); hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
...@@ -61,9 +63,41 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -61,9 +63,41 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
return 0; return 0;
} }
static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
const struct net_offload *ops;
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct xfrm_offload *xo = xfrm_offload(skb);
skb->transport_header += x->props.header_len;
ops = rcu_dereference(inet6_offloads[xo->proto]);
if (likely(ops && ops->callbacks.gso_segment))
segs = ops->callbacks.gso_segment(skb, features);
return segs;
}
static void xfrm6_transport_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + sizeof(struct ipv6hdr) + x->props.header_len);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb_reset_transport_header(skb);
skb->transport_header -= x->props.header_len;
}
}
static struct xfrm_mode xfrm6_transport_mode = { static struct xfrm_mode xfrm6_transport_mode = {
.input = xfrm6_transport_input, .input = xfrm6_transport_input,
.output = xfrm6_transport_output, .output = xfrm6_transport_output,
.gso_segment = xfrm4_transport_gso_segment,
.xmit = xfrm6_transport_xmit,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.encap = XFRM_MODE_TRANSPORT, .encap = XFRM_MODE_TRANSPORT,
}; };
......
...@@ -36,6 +36,9 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) ...@@ -36,6 +36,9 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
struct ipv6hdr *top_iph; struct ipv6hdr *top_iph;
int dsfield; int dsfield;
skb_set_inner_network_header(skb, skb_network_offset(skb));
skb_set_inner_transport_header(skb, skb_transport_offset(skb));
skb_set_network_header(skb, -x->props.header_len); skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header + skb->mac_header = skb->network_header +
offsetof(struct ipv6hdr, nexthdr); offsetof(struct ipv6hdr, nexthdr);
...@@ -96,11 +99,35 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) ...@@ -96,11 +99,35 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
return err; return err;
} }
static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
__skb_push(skb, skb->mac_len);
return skb_mac_gso_segment(skb, features);
}
static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_offload *xo = xfrm_offload(skb);
if (xo->flags & XFRM_GSO_SEGMENT) {
skb->network_header = skb->network_header - x->props.header_len;
skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
}
skb_reset_mac_len(skb);
pskb_pull(skb, skb->mac_len + x->props.header_len);
}
static struct xfrm_mode xfrm6_tunnel_mode = { static struct xfrm_mode xfrm6_tunnel_mode = {
.input2 = xfrm6_mode_tunnel_input, .input2 = xfrm6_mode_tunnel_input,
.input = xfrm_prepare_input, .input = xfrm_prepare_input,
.output2 = xfrm6_mode_tunnel_output, .output2 = xfrm6_mode_tunnel_output,
.output = xfrm6_prepare_output, .output = xfrm6_prepare_output,
.gso_segment = xfrm6_mode_tunnel_gso_segment,
.xmit = xfrm6_mode_tunnel_xmit,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.encap = XFRM_MODE_TUNNEL, .encap = XFRM_MODE_TUNNEL,
.flags = XFRM_MODE_FLAG_TUNNEL, .flags = XFRM_MODE_FLAG_TUNNEL,
......
...@@ -73,11 +73,16 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) ...@@ -73,11 +73,16 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
int mtu, ret = 0; int mtu, ret = 0;
struct dst_entry *dst = skb_dst(skb); struct dst_entry *dst = skb_dst(skb);
if (skb->ignore_df)
goto out;
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
if (mtu < IPV6_MIN_MTU) if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU; mtu = IPV6_MIN_MTU;
if (!skb->ignore_df && skb->len > mtu) { if ((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) &&
skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) {
skb->dev = dst->dev; skb->dev = dst->dev;
skb->protocol = htons(ETH_P_IPV6); skb->protocol = htons(ETH_P_IPV6);
...@@ -89,7 +94,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) ...@@ -89,7 +94,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE; ret = -EMSGSIZE;
} }
out:
return ret; return ret;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
xfrm_input.o xfrm_output.o \ xfrm_input.o xfrm_output.o \
xfrm_sysctl.o xfrm_replay.o xfrm_sysctl.o xfrm_replay.o
obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o
......
/*
* xfrm_device.c - IPsec device offloading code.
*
* Copyright (c) 2015 secunet Security Networks AG
*
* Author:
* Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/notifier.h>
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
int err;
struct xfrm_state *x;
struct xfrm_offload *xo = xfrm_offload(skb);
if (skb_is_gso(skb))
return 0;
if (xo) {
x = skb->sp->xvec[skb->sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return 0;
x->outer_mode->xmit(x, skb);
err = x->type_offload->xmit(x, skb, features);
if (err) {
XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
return err;
}
skb_push(skb, skb->data - skb_mac_header(skb));
}
return 0;
}
EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
struct xfrm_user_offload *xuo)
{
int err;
struct dst_entry *dst;
struct net_device *dev;
struct xfrm_state_offload *xso = &x->xso;
xfrm_address_t *saddr;
xfrm_address_t *daddr;
if (!x->type_offload)
return 0;
/* We don't yet support UDP encapsulation, TFC padding and ESN. */
if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
return 0;
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
saddr = &x->props.saddr;
daddr = &x->id.daddr;
} else {
saddr = &x->id.daddr;
daddr = &x->props.saddr;
}
dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, x->props.family);
if (IS_ERR(dst))
return 0;
dev = dst->dev;
dev_hold(dev);
dst_release(dst);
}
if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
dev_put(dev);
return 0;
}
xso->dev = dev;
xso->num_exthdrs = 1;
xso->flags = xuo->flags;
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
dev_put(dev);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
int mtu;
struct dst_entry *dst = skb_dst(skb);
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct net_device *dev = x->xso.dev;
if (!x->type_offload || x->encap)
return false;
if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
!dst->child->xfrm && x->type->get_mtu) {
mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
goto ok;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
goto ok;
}
return false;
ok:
if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
return true;
}
EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
int xfrm_dev_register(struct net_device *dev)
{
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
return NOTIFY_BAD;
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static int xfrm_dev_unregister(struct net_device *dev)
{
return NOTIFY_DONE;
}
static int xfrm_dev_feat_change(struct net_device *dev)
{
if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
return NOTIFY_BAD;
else if (!(dev->features & NETIF_F_HW_ESP))
dev->xfrmdev_ops = NULL;
if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
!(dev->features & NETIF_F_HW_ESP))
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static int xfrm_dev_down(struct net_device *dev)
{
if (dev->hw_features & NETIF_F_HW_ESP)
xfrm_dev_state_flush(dev_net(dev), dev, true);
xfrm_garbage_collect(dev_net(dev));
return NOTIFY_DONE;
}
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_REGISTER:
return xfrm_dev_register(dev);
case NETDEV_UNREGISTER:
return xfrm_dev_unregister(dev);
case NETDEV_FEAT_CHANGE:
return xfrm_dev_feat_change(dev);
case NETDEV_DOWN:
return xfrm_dev_down(dev);
}
return NOTIFY_DONE;
}
static struct notifier_block xfrm_dev_notifier = {
.notifier_call = xfrm_dev_event,
};
void __net_init xfrm_dev_init(void)
{
register_netdevice_notifier(&xfrm_dev_notifier);
}
...@@ -107,6 +107,8 @@ struct sec_path *secpath_dup(struct sec_path *src) ...@@ -107,6 +107,8 @@ struct sec_path *secpath_dup(struct sec_path *src)
sp->len = 0; sp->len = 0;
sp->olen = 0; sp->olen = 0;
memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH]));
if (src) { if (src) {
int i; int i;
...@@ -207,8 +209,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) ...@@ -207,8 +209,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
unsigned int family; unsigned int family;
int decaps = 0; int decaps = 0;
int async = 0; int async = 0;
struct xfrm_offload *xo;
bool xfrm_gro = false; bool xfrm_gro = false;
bool crypto_done = false;
struct xfrm_offload *xo = xfrm_offload(skb);
if (encap_type < 0) { if (encap_type < 0) {
x = xfrm_input_state(skb); x = xfrm_input_state(skb);
...@@ -220,9 +223,40 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) ...@@ -220,9 +223,40 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
seq = XFRM_SKB_CB(skb)->seq.input.low; seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume; goto resume;
} }
/* encap_type < -1 indicates a GRO call. */ /* encap_type < -1 indicates a GRO call. */
encap_type = 0; encap_type = 0;
seq = XFRM_SPI_SKB_CB(skb)->seq; seq = XFRM_SPI_SKB_CB(skb)->seq;
if (xo && (xo->flags & CRYPTO_DONE)) {
crypto_done = true;
x = xfrm_input_state(skb);
family = XFRM_SPI_SKB_CB(skb)->family;
if (!(xo->status & CRYPTO_SUCCESS)) {
if (xo->status &
(CRYPTO_TRANSPORT_AH_AUTH_FAILED |
CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
CRYPTO_TUNNEL_AH_AUTH_FAILED |
CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
x->stats.integrity_failed++;
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
}
goto lock; goto lock;
} }
...@@ -311,7 +345,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) ...@@ -311,7 +345,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb_dst_force(skb); skb_dst_force(skb);
dev_hold(skb->dev); dev_hold(skb->dev);
nexthdr = x->type->input(x, skb); if (crypto_done)
nexthdr = x->type_offload->input_tail(x, skb);
else
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS) if (nexthdr == -EINPROGRESS)
return 0; return 0;
......
...@@ -99,12 +99,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err) ...@@ -99,12 +99,13 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
skb_dst_force(skb); skb_dst_force(skb);
/* Inner headers are invalid now. */ if (xfrm_offload(skb)) {
skb->encapsulation = 0; x->type_offload->encap(x, skb);
} else {
err = x->type->output(x, skb); err = x->type->output(x, skb);
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
goto out; goto out;
}
resume: resume:
if (err) { if (err) {
...@@ -200,8 +201,40 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb ...@@ -200,8 +201,40 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
int xfrm_output(struct sock *sk, struct sk_buff *skb) int xfrm_output(struct sock *sk, struct sk_buff *skb)
{ {
struct net *net = dev_net(skb_dst(skb)->dev); struct net *net = dev_net(skb_dst(skb)->dev);
struct xfrm_state *x = skb_dst(skb)->xfrm;
int err; int err;
secpath_reset(skb);
skb->encapsulation = 0;
if (xfrm_dev_offload_ok(skb, x)) {
struct sec_path *sp;
sp = secpath_dup(skb->sp);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
skb->encapsulation = 1;
sp->olen++;
sp->xvec[skb->sp->len++] = x;
xfrm_state_hold(x);
if (skb_is_gso(skb)) {
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
return xfrm_output2(net, sk, skb);
}
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
goto out;
}
if (skb_is_gso(skb)) if (skb_is_gso(skb))
return xfrm_output_gso(net, sk, skb); return xfrm_output_gso(net, sk, skb);
...@@ -214,6 +247,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) ...@@ -214,6 +247,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
} }
} }
out:
return xfrm_output2(net, sk, skb); return xfrm_output2(net, sk, skb);
} }
EXPORT_SYMBOL_GPL(xfrm_output); EXPORT_SYMBOL_GPL(xfrm_output);
......
...@@ -116,11 +116,10 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa ...@@ -116,11 +116,10 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa
return afinfo; return afinfo;
} }
static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
int tos, int oif, const xfrm_address_t *saddr,
const xfrm_address_t *saddr, const xfrm_address_t *daddr,
const xfrm_address_t *daddr, int family)
int family)
{ {
const struct xfrm_policy_afinfo *afinfo; const struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst; struct dst_entry *dst;
...@@ -135,6 +134,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, ...@@ -135,6 +134,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
return dst; return dst;
} }
EXPORT_SYMBOL(__xfrm_dst_lookup);
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
int tos, int oif, int tos, int oif,
...@@ -2929,21 +2929,6 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) ...@@ -2929,21 +2929,6 @@ void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
} }
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_DOWN:
xfrm_garbage_collect(dev_net(dev));
}
return NOTIFY_DONE;
}
static struct notifier_block xfrm_dev_notifier = {
.notifier_call = xfrm_dev_event,
};
#ifdef CONFIG_XFRM_STATISTICS #ifdef CONFIG_XFRM_STATISTICS
static int __net_init xfrm_statistics_init(struct net *net) static int __net_init xfrm_statistics_init(struct net *net)
{ {
...@@ -3020,7 +3005,7 @@ static int __net_init xfrm_policy_init(struct net *net) ...@@ -3020,7 +3005,7 @@ static int __net_init xfrm_policy_init(struct net *net)
INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
if (net_eq(net, &init_net)) if (net_eq(net, &init_net))
register_netdevice_notifier(&xfrm_dev_notifier); xfrm_dev_init();
return 0; return 0;
out_bydst: out_bydst:
......
...@@ -45,7 +45,8 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) ...@@ -45,7 +45,8 @@ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
return seq_hi; return seq_hi;
} }
EXPORT_SYMBOL(xfrm_replay_seqhi);
;
static void xfrm_replay_notify(struct xfrm_state *x, int event) static void xfrm_replay_notify(struct xfrm_state *x, int event)
{ {
struct km_event c; struct km_event c;
...@@ -558,6 +559,158 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) ...@@ -558,6 +559,158 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
x->repl->notify(x, XFRM_REPLAY_UPDATE); x->repl->notify(x, XFRM_REPLAY_UPDATE);
} }
#ifdef CONFIG_XFRM_OFFLOAD
static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct net *net = xs_net(x);
struct xfrm_offload *xo = xfrm_offload(skb);
__u32 oseq = x->replay.oseq;
if (!xo)
return xfrm_replay_overflow(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
xo->seq.low = oseq;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
xo->seq.low = oseq + 1;
oseq += skb_shinfo(skb)->gso_segs;
}
XFRM_SKB_CB(skb)->seq.output.hi = 0;
xo->seq.hi = 0;
if (unlikely(oseq < x->replay.oseq)) {
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
x->replay.oseq = oseq;
if (xfrm_aevent_is_on(net))
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_offload *xo = xfrm_offload(skb);
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
__u32 oseq = replay_esn->oseq;
if (!xo)
return xfrm_replay_overflow_bmp(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
xo->seq.low = oseq;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
xo->seq.low = oseq + 1;
oseq += skb_shinfo(skb)->gso_segs;
}
XFRM_SKB_CB(skb)->seq.output.hi = 0;
xo->seq.hi = 0;
if (unlikely(oseq < replay_esn->oseq)) {
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
} else {
replay_esn->oseq = oseq;
}
if (xfrm_aevent_is_on(net))
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
struct xfrm_offload *xo = xfrm_offload(skb);
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct net *net = xs_net(x);
__u32 oseq = replay_esn->oseq;
__u32 oseq_hi = replay_esn->oseq_hi;
if (!xo)
return xfrm_replay_overflow_esn(x, skb);
if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
if (!skb_is_gso(skb)) {
XFRM_SKB_CB(skb)->seq.output.low = ++oseq;
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
xo->seq.low = oseq;
xo->seq.hi = oseq_hi;
} else {
XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
xo->seq.low = oseq = oseq + 1;
xo->seq.hi = oseq_hi;
oseq += skb_shinfo(skb)->gso_segs;
}
if (unlikely(oseq < replay_esn->oseq)) {
XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
xo->seq.hi = oseq_hi;
if (replay_esn->oseq_hi == 0) {
replay_esn->oseq--;
replay_esn->oseq_hi--;
xfrm_audit_state_replay_overflow(x, skb);
err = -EOVERFLOW;
return err;
}
}
replay_esn->oseq = oseq;
replay_esn->oseq_hi = oseq_hi;
if (xfrm_aevent_is_on(net))
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
return err;
}
static const struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
.recheck = xfrm_replay_check,
.notify = xfrm_replay_notify,
.overflow = xfrm_replay_overflow_offload,
};
static const struct xfrm_replay xfrm_replay_bmp = {
.advance = xfrm_replay_advance_bmp,
.check = xfrm_replay_check_bmp,
.recheck = xfrm_replay_check_bmp,
.notify = xfrm_replay_notify_bmp,
.overflow = xfrm_replay_overflow_offload_bmp,
};
static const struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
.recheck = xfrm_replay_recheck_esn,
.notify = xfrm_replay_notify_esn,
.overflow = xfrm_replay_overflow_offload_esn,
};
#else
static const struct xfrm_replay xfrm_replay_legacy = { static const struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance, .advance = xfrm_replay_advance,
.check = xfrm_replay_check, .check = xfrm_replay_check,
...@@ -581,6 +734,7 @@ static const struct xfrm_replay xfrm_replay_esn = { ...@@ -581,6 +734,7 @@ static const struct xfrm_replay xfrm_replay_esn = {
.notify = xfrm_replay_notify_esn, .notify = xfrm_replay_notify_esn,
.overflow = xfrm_replay_overflow_esn, .overflow = xfrm_replay_overflow_esn,
}; };
#endif
int xfrm_init_replay(struct xfrm_state *x) int xfrm_init_replay(struct xfrm_state *x)
{ {
...@@ -595,10 +749,12 @@ int xfrm_init_replay(struct xfrm_state *x) ...@@ -595,10 +749,12 @@ int xfrm_init_replay(struct xfrm_state *x)
if (replay_esn->replay_window == 0) if (replay_esn->replay_window == 0)
return -EINVAL; return -EINVAL;
x->repl = &xfrm_replay_esn; x->repl = &xfrm_replay_esn;
} else } else {
x->repl = &xfrm_replay_bmp; x->repl = &xfrm_replay_bmp;
} else }
} else {
x->repl = &xfrm_replay_legacy; x->repl = &xfrm_replay_legacy;
}
return 0; return 0;
} }
......
...@@ -251,6 +251,75 @@ static void xfrm_put_type(const struct xfrm_type *type) ...@@ -251,6 +251,75 @@ static void xfrm_put_type(const struct xfrm_type *type)
module_put(type->owner); module_put(type->owner);
} }
static DEFINE_SPINLOCK(xfrm_type_offload_lock);
int xfrm_register_type_offload(const struct xfrm_type_offload *type,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
const struct xfrm_type_offload **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_offload_map;
spin_lock_bh(&xfrm_type_offload_lock);
if (likely(typemap[type->proto] == NULL))
typemap[type->proto] = type;
else
err = -EEXIST;
spin_unlock_bh(&xfrm_type_offload_lock);
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_register_type_offload);
int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
const struct xfrm_type_offload **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_offload_map;
spin_lock_bh(&xfrm_type_offload_lock);
if (unlikely(typemap[type->proto] != type))
err = -ENOENT;
else
typemap[type->proto] = NULL;
spin_unlock_bh(&xfrm_type_offload_lock);
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_unregister_type_offload);
static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family)
{
struct xfrm_state_afinfo *afinfo;
const struct xfrm_type_offload **typemap;
const struct xfrm_type_offload *type;
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
typemap = afinfo->type_offload_map;
type = typemap[proto];
if ((type && !try_module_get(type->owner)))
type = NULL;
rcu_read_unlock();
return type;
}
static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
{
module_put(type->owner);
}
static DEFINE_SPINLOCK(xfrm_mode_lock); static DEFINE_SPINLOCK(xfrm_mode_lock);
int xfrm_register_mode(struct xfrm_mode *mode, int family) int xfrm_register_mode(struct xfrm_mode *mode, int family)
{ {
...@@ -365,10 +434,13 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) ...@@ -365,10 +434,13 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
xfrm_put_mode(x->inner_mode_iaf); xfrm_put_mode(x->inner_mode_iaf);
if (x->outer_mode) if (x->outer_mode)
xfrm_put_mode(x->outer_mode); xfrm_put_mode(x->outer_mode);
if (x->type_offload)
xfrm_put_type_offload(x->type_offload);
if (x->type) { if (x->type) {
x->type->destructor(x); x->type->destructor(x);
xfrm_put_type(x->type); xfrm_put_type(x->type);
} }
xfrm_dev_state_free(x);
security_xfrm_state_free(x); security_xfrm_state_free(x);
kfree(x); kfree(x);
} }
...@@ -538,6 +610,8 @@ int __xfrm_state_delete(struct xfrm_state *x) ...@@ -538,6 +610,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
net->xfrm.state_num--; net->xfrm.state_num--;
spin_unlock(&net->xfrm.xfrm_state_lock); spin_unlock(&net->xfrm.xfrm_state_lock);
xfrm_dev_state_delete(x);
/* All xfrm_state objects are created by xfrm_state_alloc. /* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that * The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here. * is what we are dropping here.
...@@ -582,12 +656,41 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) ...@@ -582,12 +656,41 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
return err; return err;
} }
static inline int
xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
{
int i, err = 0;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
struct xfrm_state_offload *xso;
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
if (xso->dev == dev &&
(err = security_xfrm_state_delete(x)) != 0) {
xfrm_audit_state_delete(x, 0, task_valid);
return err;
}
}
}
return err;
}
#else #else
static inline int static inline int
xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
{ {
return 0; return 0;
} }
static inline int
xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
{
return 0;
}
#endif #endif
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
...@@ -630,6 +733,48 @@ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) ...@@ -630,6 +733,48 @@ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
} }
EXPORT_SYMBOL(xfrm_state_flush); EXPORT_SYMBOL(xfrm_state_flush);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
{
int i, err = 0, cnt = 0;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
if (err)
goto out;
err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct xfrm_state *x;
struct xfrm_state_offload *xso;
restart:
hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
xso = &x->xso;
if (!xfrm_state_kern(x) && xso->dev == dev) {
xfrm_state_hold(x);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
xfrm_state_put(x);
if (!err)
cnt++;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
goto restart;
}
}
}
if (cnt)
err = 0;
out:
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
return err;
}
EXPORT_SYMBOL(xfrm_dev_state_flush);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{ {
spin_lock_bh(&net->xfrm.xfrm_state_lock); spin_lock_bh(&net->xfrm.xfrm_state_lock);
...@@ -2077,6 +2222,8 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) ...@@ -2077,6 +2222,8 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
if (x->type == NULL) if (x->type == NULL)
goto error; goto error;
x->type_offload = xfrm_get_type_offload(x->id.proto, family);
err = x->type->init_state(x); err = x->type->init_state(x);
if (err) if (err)
goto error; goto error;
......
...@@ -595,6 +595,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, ...@@ -595,6 +595,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error; goto error;
} }
if (attrs[XFRMA_OFFLOAD_DEV] &&
xfrm_dev_state_add(net, x, nla_data(attrs[XFRMA_OFFLOAD_DEV])))
goto error;
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL]))) attrs[XFRMA_REPLAY_ESN_VAL])))
goto error; goto error;
...@@ -779,6 +783,23 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) ...@@ -779,6 +783,23 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
return 0; return 0;
} }
static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
{
struct xfrm_user_offload *xuo;
struct nlattr *attr;
attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
if (attr == NULL)
return -EMSGSIZE;
xuo = nla_data(attr);
xuo->ifindex = xso->dev->ifindex;
xuo->flags = xso->flags;
return 0;
}
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
{ {
struct xfrm_algo *algo; struct xfrm_algo *algo;
...@@ -869,6 +890,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x, ...@@ -869,6 +890,10 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
&x->replay); &x->replay);
if (ret) if (ret)
goto out; goto out;
if(x->xso.dev)
ret = copy_user_offload(&x->xso, skb);
if (ret)
goto out;
if (x->security) if (x->security)
ret = copy_sec_ctx(x->security, skb); ret = copy_sec_ctx(x->security, skb);
out: out:
...@@ -2406,6 +2431,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { ...@@ -2406,6 +2431,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
[XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_PROTO] = { .type = NLA_U8 },
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
[XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
}; };
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
...@@ -2623,6 +2649,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x) ...@@ -2623,6 +2649,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
l += nla_total_size(sizeof(*x->coaddr)); l += nla_total_size(sizeof(*x->coaddr));
if (x->props.extra_flags) if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags)); l += nla_total_size(sizeof(x->props.extra_flags));
if (x->xso.dev)
l += nla_total_size(sizeof(x->xso));
/* Must count x->lastused as it may become non-zero behind our back. */ /* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64)); l += nla_total_size_64bit(sizeof(u64));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment