Commit b6e5d27e authored by Sharath Chandra Vurukala's avatar Sharath Chandra Vurukala Committed by David S. Miller

net: ethernet: rmnet: Add support for MAPv5 egress packets

Adding support for MAPv5 egress packets.

This involves adding the MAPv5 header and setting the csum_valid_required
in the checksum header to request HW compute the checksum.

Corresponding stats are incremented based on whether the checksum is
computed in software or HW.

New stat has been added which represents the count of packets whose
checksum is calculated by the HW.
Signed-off-by: default avatarSharath Chandra Vurukala <sharathv@codeaurora.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e1d9a90a
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation.
* All rights reserved.
*
* RMNET Data configuration engine
*/
......@@ -56,6 +57,7 @@ struct rmnet_priv_stats {
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
};
struct rmnet_priv {
......
......@@ -133,7 +133,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len;
int required_headroom, additional_header_len, csum_type = 0;
struct rmnet_map_header *map_header;
additional_header_len = 0;
......@@ -141,18 +141,23 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
required_headroom += additional_header_len;
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
required_headroom += additional_header_len;
if (skb_cow_head(skb, required_headroom) < 0)
return -ENOMEM;
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
rmnet_map_checksum_uplink_packet(skb, orig_dev);
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
map_header = rmnet_map_add_map_header(skb, additional_header_len,
port, 0);
if (!map_header)
return -ENOMEM;
......
......@@ -43,11 +43,15 @@ enum rmnet_map_commands {
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
int hdrlen,
struct rmnet_port *port,
int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
#endif /* _RMNET_MAP_H_ */
......@@ -251,12 +251,69 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
}
#endif
static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_csum_header *ul_header;
ul_header = skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
MAPV5_HDRINFO_HDR_TYPE_FMASK);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = ip_hdr(skb);
__sum16 *check;
void *trans;
u8 proto;
if (skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6)) {
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
}
if (skb->protocol == htons(ETH_P_IP)) {
u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
proto = ((struct iphdr *)iph)->protocol;
trans = iph + ip_len;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
u16 ip_len = sizeof(struct ipv6hdr);
proto = ((struct ipv6hdr *)iph)->nexthdr;
trans = iph + ip_len;
#else
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif /* CONFIG_IPV6 */
}
check = rmnet_map_get_csum_field(proto, trans);
if (check) {
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
priv->stats.csum_hw++;
return;
}
}
sw_csum:
priv->stats.csum_sw++;
}
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad)
int hdrlen,
struct rmnet_port *port,
int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
......@@ -267,6 +324,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
/* Set next_hdr bit for csum offload packets */
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
map_header->flags |= MAP_NEXT_HEADER_FLAG;
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
......@@ -393,11 +454,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return 0;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
......@@ -416,10 +474,12 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
......@@ -436,6 +496,26 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
priv->stats.csum_sw++;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type)
{
switch (csum_type) {
case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
break;
default:
break;
}
}
/* Process a MAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
u16 len)
......
......@@ -174,6 +174,7 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
"Checksum computed in hardware",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
......
......@@ -1237,6 +1237,7 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment