Commit d5b60c65 authored by Steffen Klassert's avatar Steffen Klassert

Merge branch 'Support IPsec crypto offload for IPv6 ESP and IPv4 UDP-encapsulated ESP data paths'

Mike Yu says:

====================
Currently, IPsec crypto offload is enabled for GRO code path. However, there
are other code paths where the XFRM stack is involved; for example, IPv6 ESP
packets handled by xfrm6_esp_rcv() in ESP layer, and IPv4 UDP-encapsulated
ESP packets handled by udp_rcv() in UDP layer.

This patchset extends the crypto offload support to cover these two cases.
This is useful for devices with traffic accounting (e.g., Android), where GRO
can lead to inaccurate accounting on the underlying network. For example, VPN
traffic might not be counted on the wifi network interface wlan0 if the packets
are handled in GRO code path before entering the network stack for accounting.

Below is the RX data path scenario the crypto offload can be applied to.

  +-----------+   +-------+
  | HW Driver |-->| wlan0 |--------+
  +-----------+   +-------+        |
                                   v
                             +---------------+   +------+
                     +------>| Network Stack |-->| Apps |
                     |       +---------------+   +------+
                     |             |
                     |             v
                 +--------+   +------------+
                 | ipsec1 |<--| XFRM Stack |
                 +--------+   +------------+
====================
Signed-off-by: default avatarSteffen Klassert <steffen.klassert@secunet.com>
parents f531d13b 447bc4b1
...@@ -349,6 +349,7 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, ...@@ -349,6 +349,7 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
{ {
struct udphdr *uh; struct udphdr *uh;
unsigned int len; unsigned int len;
struct xfrm_offload *xo = xfrm_offload(skb);
len = skb->len + esp->tailen - skb_transport_offset(skb); len = skb->len + esp->tailen - skb_transport_offset(skb);
if (len + sizeof(struct iphdr) > IP_MAX_MTU) if (len + sizeof(struct iphdr) > IP_MAX_MTU)
...@@ -360,7 +361,12 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, ...@@ -360,7 +361,12 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb,
uh->len = htons(len); uh->len = htons(len);
uh->check = 0; uh->check = 0;
*skb_mac_header(skb) = IPPROTO_UDP; /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload
* data path, which means that esp_output_udp_encap is called outside of the XFRM stack.
* In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it.
*/
if (!xo || encap_type != UDP_ENCAP_ESPINUDP)
*skb_mac_header(skb) = IPPROTO_UDP;
return (struct ip_esp_hdr *)(uh + 1); return (struct ip_esp_hdr *)(uh + 1);
} }
......
...@@ -264,6 +264,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -264,6 +264,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct esp_info esp; struct esp_info esp;
bool hw_offload = true; bool hw_offload = true;
__u32 seq; __u32 seq;
int encap_type = 0;
esp.inplace = true; esp.inplace = true;
...@@ -296,8 +297,10 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -296,8 +297,10 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.esph = ip_esp_hdr(skb); esp.esph = ip_esp_hdr(skb);
if (x->encap)
encap_type = x->encap->encap_type;
if (!hw_offload || !skb_is_gso(skb)) { if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) {
esp.nfrags = esp_output_head(x, skb, &esp); esp.nfrags = esp_output_head(x, skb, &esp);
if (esp.nfrags < 0) if (esp.nfrags < 0)
return esp.nfrags; return esp.nfrags;
...@@ -324,6 +327,18 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ ...@@ -324,6 +327,18 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) {
/* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
* setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
* points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
* However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
* Therefore, the protocol field needs to be corrected.
*/
ip_hdr(skb)->protocol = IPPROTO_UDP;
esph->seq_no = htonl(seq);
}
ip_hdr(skb)->tot_len = htons(skb->len); ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb)); ip_send_check(ip_hdr(skb));
......
...@@ -261,9 +261,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, ...@@ -261,9 +261,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
/* We don't yet support UDP encapsulation and TFC padding. */ /* We don't yet support TFC padding. */
if ((!is_packet_offload && x->encap) || x->tfcpad) { if (x->tfcpad) {
NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded"); NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded");
return -EINVAL; return -EINVAL;
} }
......
...@@ -471,7 +471,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) ...@@ -471,7 +471,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp; struct sec_path *sp;
if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) { if (encap_type < 0 || (xo && (xo->flags & XFRM_GRO || encap_type == 0 ||
encap_type == UDP_ENCAP_ESPINUDP))) {
x = xfrm_input_state(skb); x = xfrm_input_state(skb);
if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
......
...@@ -3718,12 +3718,15 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, ...@@ -3718,12 +3718,15 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id); pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
if (!pol) { if (!pol) {
const bool is_crypto_offload = sp &&
(xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0; return 0;
} }
if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
xfrm_secpath_reject(xerr_idx, skb, &fl); xfrm_secpath_reject(xerr_idx, skb, &fl);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment