Commit 32764c66 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

net: 8021q: move vlan offload registrations into vlan_core

Currently, the vlan packet offloads are registered only upon 8021q module
load. However, even without this module loaded, the offloads could be
utilized, for example by openvswitch datapath. As reported by Michael,
that causes 2x to 5x performance improvement, depending on a testcase.

So move the vlan offload registrations into vlan_core and make this
available even without 8021q module loaded.
Reported-by: default avatarMichael Shteinbok <michaelsh86@gmail.com>
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Tested-by: default avatarMichael Shteinbok <michaelsh86@gmail.com>
Reviewed-by: default avatarDavid Ahern <dsahern@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 99310e73
......@@ -648,93 +648,6 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
return err;
}
static struct sk_buff *vlan_gro_receive(struct list_head *head,
struct sk_buff *skb)
{
const struct packet_offload *ptype;
unsigned int hlen, off_vlan;
struct sk_buff *pp = NULL;
struct vlan_hdr *vhdr;
struct sk_buff *p;
__be16 type;
int flush = 1;
off_vlan = skb_gro_offset(skb);
hlen = off_vlan + sizeof(*vhdr);
vhdr = skb_gro_header_fast(skb, off_vlan);
if (skb_gro_header_hard(skb, hlen)) {
vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
if (unlikely(!vhdr))
goto out;
}
type = vhdr->h_vlan_encapsulated_proto;
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out_unlock;
flush = 0;
list_for_each_entry(p, head, list) {
struct vlan_hdr *vhdr2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
if (compare_vlan_header(vhdr, vhdr2))
NAPI_GRO_CB(p)->same_flow = 0;
}
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
{
struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
__be16 type = vhdr->h_vlan_encapsulated_proto;
struct packet_offload *ptype;
int err = -ENOENT;
rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
rcu_read_unlock();
return err;
}
static struct packet_offload vlan_packet_offloads[] __read_mostly = {
{
.type = cpu_to_be16(ETH_P_8021Q),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
{
.type = cpu_to_be16(ETH_P_8021AD),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
};
static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
......@@ -762,7 +675,6 @@ static struct pernet_operations vlan_net_ops = {
static int __init vlan_proto_init(void)
{
int err;
unsigned int i;
pr_info("%s v%s\n", vlan_fullname, vlan_version);
......@@ -786,9 +698,6 @@ static int __init vlan_proto_init(void)
if (err < 0)
goto err5;
for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
dev_add_offload(&vlan_packet_offloads[i]);
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
......@@ -806,13 +715,8 @@ static int __init vlan_proto_init(void)
static void __exit vlan_cleanup_module(void)
{
unsigned int i;
vlan_ioctl_set(NULL);
for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
dev_remove_offload(&vlan_packet_offloads[i]);
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
......
......@@ -453,3 +453,102 @@ bool vlan_uses_dev(const struct net_device *dev)
return vlan_info->grp.nr_vlan_devs ? true : false;
}
EXPORT_SYMBOL(vlan_uses_dev);
static struct sk_buff *vlan_gro_receive(struct list_head *head,
struct sk_buff *skb)
{
const struct packet_offload *ptype;
unsigned int hlen, off_vlan;
struct sk_buff *pp = NULL;
struct vlan_hdr *vhdr;
struct sk_buff *p;
__be16 type;
int flush = 1;
off_vlan = skb_gro_offset(skb);
hlen = off_vlan + sizeof(*vhdr);
vhdr = skb_gro_header_fast(skb, off_vlan);
if (skb_gro_header_hard(skb, hlen)) {
vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
if (unlikely(!vhdr))
goto out;
}
type = vhdr->h_vlan_encapsulated_proto;
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
goto out_unlock;
flush = 0;
list_for_each_entry(p, head, list) {
struct vlan_hdr *vhdr2;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
if (compare_vlan_header(vhdr, vhdr2))
NAPI_GRO_CB(p)->same_flow = 0;
}
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
{
struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
__be16 type = vhdr->h_vlan_encapsulated_proto;
struct packet_offload *ptype;
int err = -ENOENT;
rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
rcu_read_unlock();
return err;
}
static struct packet_offload vlan_packet_offloads[] __read_mostly = {
{
.type = cpu_to_be16(ETH_P_8021Q),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
{
.type = cpu_to_be16(ETH_P_8021AD),
.priority = 10,
.callbacks = {
.gro_receive = vlan_gro_receive,
.gro_complete = vlan_gro_complete,
},
},
};
static int __init vlan_offload_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
dev_add_offload(&vlan_packet_offloads[i]);
return 0;
}
fs_initcall(vlan_offload_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment