Commit 56f0dcc5 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin

tun: TUN_VNET_LE support, fix sparse warnings for virtio headers

Pretty straight-forward: convert all fields to/from
virtio endian-ness.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Reviewed-by: default avatarJason Wang <jasowang@redhat.com>


parent e999d6ea
...@@ -111,7 +111,7 @@ do { \ ...@@ -111,7 +111,7 @@ do { \
#define TUN_FASYNC IFF_ATTACH_QUEUE #define TUN_FASYNC IFF_ATTACH_QUEUE
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE) IFF_VNET_LE | IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128 #define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8 #define FLT_EXACT_COUNT 8
...@@ -205,6 +205,16 @@ struct tun_struct { ...@@ -205,6 +205,16 @@ struct tun_struct {
u32 flow_count; u32 flow_count;
}; };
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
return __virtio16_to_cpu(tun->flags & IFF_VNET_LE, val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
return __cpu_to_virtio16(tun->flags & IFF_VNET_LE, val);
}
static inline u32 tun_hashfn(u32 rxhash) static inline u32 tun_hashfn(u32 rxhash)
{ {
return rxhash & 0x3ff; return rxhash & 0x3ff;
...@@ -1053,10 +1063,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1053,10 +1063,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EFAULT; return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
gso.hdr_len = gso.csum_start + gso.csum_offset + 2; gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
if (gso.hdr_len > len) if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL; return -EINVAL;
offset += tun->vnet_hdr_sz; offset += tun->vnet_hdr_sz;
} }
...@@ -1064,7 +1074,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1064,7 +1074,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN; align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN || if (unlikely(len < ETH_HLEN ||
(gso.hdr_len && gso.hdr_len < ETH_HLEN))) (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
return -EINVAL; return -EINVAL;
} }
...@@ -1075,7 +1085,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1075,7 +1085,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used. * enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace. * The rest of the buffer is mapped from userspace.
*/ */
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear) if (copylen > good_linear)
copylen = good_linear; copylen = good_linear;
linear = copylen; linear = copylen;
...@@ -1085,10 +1095,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1085,10 +1095,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (!zerocopy) { if (!zerocopy) {
copylen = len; copylen = len;
if (gso.hdr_len > good_linear) if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
linear = good_linear; linear = good_linear;
else else
linear = gso.hdr_len; linear = tun16_to_cpu(tun, gso.hdr_len);
} }
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
...@@ -1115,8 +1125,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1115,8 +1125,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} }
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (!skb_partial_csum_set(skb, gso.csum_start, if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
gso.csum_offset)) { tun16_to_cpu(tun, gso.csum_offset))) {
tun->dev->stats.rx_frame_errors++; tun->dev->stats.rx_frame_errors++;
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;
...@@ -1184,7 +1194,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, ...@@ -1184,7 +1194,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = gso.gso_size; skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
if (skb_shinfo(skb)->gso_size == 0) { if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++; tun->dev->stats.rx_frame_errors++;
kfree_skb(skb); kfree_skb(skb);
...@@ -1276,8 +1286,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, ...@@ -1276,8 +1286,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct skb_shared_info *sinfo = skb_shinfo(skb); struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */ /* This is a hint as to how much should be linear. */
gso.hdr_len = skb_headlen(skb); gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
gso.gso_size = sinfo->gso_size; gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
if (sinfo->gso_type & SKB_GSO_TCPV4) if (sinfo->gso_type & SKB_GSO_TCPV4)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6) else if (sinfo->gso_type & SKB_GSO_TCPV6)
...@@ -1285,12 +1295,12 @@ static ssize_t tun_put_user(struct tun_struct *tun, ...@@ -1285,12 +1295,12 @@ static ssize_t tun_put_user(struct tun_struct *tun,
else { else {
pr_err("unexpected GSO type: " pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n", "0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, gso.gso_size, sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
gso.hdr_len); tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ", print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE, DUMP_PREFIX_NONE,
16, 1, skb->head, 16, 1, skb->head,
min((int)gso.hdr_len, 64), true); min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return -EINVAL; return -EINVAL;
} }
...@@ -1301,9 +1311,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, ...@@ -1301,9 +1311,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
gso.csum_start = skb_checksum_start_offset(skb) + gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
vlan_hlen; vlan_hlen);
gso.csum_offset = skb->csum_offset; gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */ } /* else everything is zero */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment