Commit df6ce470 authored by David S. Miller's avatar David S. Miller

Merge branch 'vlan_action'

Jiri Pirko says:

====================
sched: introduce vlan action

Please see the individual patches for info
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fe159122 c7e2b968
......@@ -475,12 +475,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev;
if (client_info->vlan_id) {
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
if (!skb) {
netdev_err(client_info->slave->bond->dev,
"failed to insert VLAN tag\n");
continue;
}
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
client_info->vlan_id);
}
arp_xmit(skb);
......@@ -951,13 +947,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
if (vid) {
skb = vlan_put_tag(skb, vlan_proto, vid);
if (!skb) {
netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
return;
}
}
if (vid)
__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
dev_queue_xmit(skb);
}
......
......@@ -2146,7 +2146,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), tags->vlan_id);
skb = __vlan_put_tag(skb, tags->vlan_proto,
skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
......@@ -2159,12 +2159,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
if (outer_tag->vlan_id) {
netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
skb = vlan_put_tag(skb, outer_tag->vlan_proto,
__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
outer_tag->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
}
}
xmit:
......
......@@ -887,7 +887,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
}
if (vlan_tag) {
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
skb->vlan_tci = 0;
......@@ -896,7 +897,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
/* Insert the outer VLAN, if any */
if (adapter->qnq_vid) {
vlan_tag = adapter->qnq_vid;
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
if (skip_hw_vlan)
......
......@@ -402,7 +402,7 @@ static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_
/* map MBIM session to VLAN */
if (tci)
vlan_put_tag(skb, htons(ETH_P_8021Q), tci);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
err:
return skb;
}
......
......@@ -1599,15 +1599,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
if (vlan_tx_tag_present(skb)) {
if (WARN_ON(!__vlan_put_tag(skb,
skb->vlan_proto,
vlan_tx_tag_get(skb))))
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
return -ENOMEM;
skb->vlan_tci = 0;
}
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
vxh->vx_vni = vni;
......@@ -1643,15 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
if (unlikely(err))
return err;
if (vlan_tx_tag_present(skb)) {
if (WARN_ON(!__vlan_put_tag(skb,
skb->vlan_proto,
vlan_tx_tag_get(skb))))
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
return -ENOMEM;
skb->vlan_tci = 0;
}
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
vxh->vx_vni = vni;
......
......@@ -1669,10 +1669,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
/* must set skb->dev before calling vlan_put_tag */
skb->dev = fcoe->realdev;
skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_dev_vlan_id(fcoe->netdev));
if (!skb)
return -ENOMEM;
} else
skb->dev = fcoe->netdev;
......
......@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
}
/**
* vlan_insert_tag - regular VLAN tag inserting
* __vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
* Returns error if skb_cow_head failes.
*
* Does not change skb->protocol so this function can be used during receive.
*/
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, VLAN_HLEN) < 0) {
dev_kfree_skb_any(skb);
return NULL;
}
if (skb_cow_head(skb, VLAN_HLEN) < 0)
return -ENOMEM;
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
......@@ -316,12 +312,13 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
/* now, the TCI */
veth->h_vlan_TCI = htons(vlan_tci);
return skb;
return 0;
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* vlan_insert_tag - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
......@@ -329,49 +326,91 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
*/
static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
skb->protocol = vlan_proto;
int err;
err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (err) {
dev_kfree_skb_any(skb);
return NULL;
}
return skb;
}
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* vlan_insert_tag_set_proto - regular VLAN tag inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
if (skb)
skb->protocol = vlan_proto;
return skb;
}
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
*
* Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
{
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (likely(skb))
skb->vlan_tci = 0;
return skb;
}
/*
* vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
*
* Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
* VLAN tag from @skb->vlan_tci inside to the payload.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
/**
* vlan_put_tag - inserts VLAN tag according to device features
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
* @vlan_proto: VLAN encapsulation protocol
* @vlan_tci: VLAN TCI to insert
*
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb.
* Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb,
static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) {
return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
} else {
return __vlan_put_tag(skb, vlan_proto, vlan_tci);
}
skb->vlan_proto = vlan_proto;
skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
}
/**
......
......@@ -2678,6 +2678,9 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct skb_checksum_ops {
__wsum (*update)(const void *mem, int len, __wsum wsum);
......
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __NET_TC_VLAN_H
#define __NET_TC_VLAN_H
#include <net/act_api.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
struct tcf_vlan {
struct tcf_common common;
int tcfv_action;
__be16 tcfv_push_vid;
__be16 tcfv_push_proto;
};
#define to_vlan(a) \
container_of(a->priv, struct tcf_vlan, common)
#endif /* __NET_TC_VLAN_H */
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_TC_VLAN_H
#define __LINUX_TC_VLAN_H
#include <linux/pkt_cls.h>
#define TCA_ACT_VLAN 12
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
struct tc_vlan {
tc_gen;
int v_action;
};
enum {
TCA_VLAN_UNSPEC,
TCA_VLAN_TM,
TCA_VLAN_PARMS,
TCA_VLAN_PUSH_VLAN_ID,
TCA_VLAN_PUSH_VLAN_PROTOCOL,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
#endif
......@@ -150,7 +150,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
u16 vlan_tci;
vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
__vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci);
}
skb->dev = vlan->real_dev;
......
......@@ -199,7 +199,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (skb->vlan_proto != proto) {
/* Protocol-mismatch, empty out vlan_tci for new tag */
skb_push(skb, ETH_HLEN);
skb = __vlan_put_tag(skb, skb->vlan_proto,
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (unlikely(!skb))
return false;
......
......@@ -2644,12 +2644,8 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
netdev_features_t features)
{
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (skb)
skb->vlan_tci = 0;
}
!vlan_hw_offload_capable(features, skb->vlan_proto))
skb = __vlan_hwaccel_push_inside(skb);
return skb;
}
......
......@@ -79,8 +79,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
skb = __vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
/* This is actually a packet drop, but we
* don't want the code that calls this
......@@ -88,7 +87,6 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
*/
goto out;
}
skb->vlan_tci = 0;
}
status = netdev_start_xmit(skb, dev, txq, false);
......
......@@ -4151,6 +4151,113 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_vlan_untag);
int skb_ensure_writable(struct sk_buff *skb, int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
EXPORT_SYMBOL(skb_ensure_writable);
/* remove VLAN header from packet and update csum accordingly. */
static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_hdr *vhdr;
unsigned int offset = skb->data - skb_mac_header(skb);
int err;
__skb_push(skb, offset);
err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
if (unlikely(err))
goto pull;
skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
*vlan_tci = ntohs(vhdr->h_vlan_TCI);
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
__skb_pull(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr);
skb->mac_header += VLAN_HLEN;
if (skb_network_offset(skb) < ETH_HLEN)
skb_set_network_header(skb, ETH_HLEN);
skb_reset_mac_len(skb);
pull:
__skb_pull(skb, offset);
return err;
}
int skb_vlan_pop(struct sk_buff *skb)
{
u16 vlan_tci;
__be16 vlan_proto;
int err;
if (likely(vlan_tx_tag_present(skb))) {
skb->vlan_tci = 0;
} else {
if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
skb->protocol != htons(ETH_P_8021AD)) ||
skb->len < VLAN_ETH_HLEN))
return 0;
err = __skb_vlan_pop(skb, &vlan_tci);
if (err)
return err;
}
/* move next vlan tag to hw accel tag */
if (likely((skb->protocol != htons(ETH_P_8021Q) &&
skb->protocol != htons(ETH_P_8021AD)) ||
skb->len < VLAN_ETH_HLEN))
return 0;
vlan_proto = skb->protocol;
err = __skb_vlan_pop(skb, &vlan_tci);
if (unlikely(err))
return err;
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
EXPORT_SYMBOL(skb_vlan_pop);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
{
if (vlan_tx_tag_present(skb)) {
unsigned int offset = skb->data - skb_mac_header(skb);
int err;
/* __vlan_insert_tag expect skb->data pointing to mac header.
* So change skb->data before calling it and change back to
* original position later
*/
__skb_push(skb, offset);
err = __vlan_insert_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (err)
return err;
skb->protocol = skb->vlan_proto;
skb->mac_len += VLAN_HLEN;
__skb_pull(skb, offset);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
}
__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
return 0;
}
EXPORT_SYMBOL(skb_vlan_push);
/**
* alloc_skb_with_frags - allocate skb with page frags
*
......
......@@ -131,15 +131,9 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
if (unlikely(err))
return err;
if (vlan_tx_tag_present(skb)) {
if (unlikely(!__vlan_put_tag(skb,
skb->vlan_proto,
vlan_tx_tag_get(skb)))) {
err = -ENOMEM;
return err;
}
skb->vlan_tci = 0;
}
skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb))
return -ENOMEM;
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
......
......@@ -119,17 +119,6 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
return !!key->eth.type;
}
static int make_writable(struct sk_buff *skb, int write_len)
{
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_mpls *mpls)
{
......@@ -171,14 +160,11 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
struct ethhdr *hdr;
int err;
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum,
csum_partial(skb_mpls_header(skb),
MPLS_HLEN, 0));
skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
skb->mac_len);
......@@ -204,7 +190,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
__be32 *stack;
int err;
err = make_writable(skb, skb->mac_len + MPLS_HLEN);
err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
......@@ -220,100 +206,34 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
return 0;
}
/* remove VLAN header from packet and update csum accordingly. */
static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
{
struct vlan_hdr *vhdr;
int err;
err = make_writable(skb, VLAN_ETH_HLEN);
if (unlikely(err))
return err;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
*current_tci = vhdr->h_vlan_TCI;
memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
__skb_pull(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vhdr);
skb->mac_header += VLAN_HLEN;
if (skb_network_offset(skb) < ETH_HLEN)
skb_set_network_header(skb, ETH_HLEN);
/* Update mac_len for subsequent MPLS actions */
skb_reset_mac_len(skb);
return 0;
}
static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
__be16 tci;
int err;
if (likely(vlan_tx_tag_present(skb))) {
skb->vlan_tci = 0;
} else {
if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
skb->len < VLAN_ETH_HLEN))
return 0;
err = __pop_vlan_tci(skb, &tci);
if (err)
return err;
}
/* move next vlan tag to hw accel tag */
if (likely(skb->protocol != htons(ETH_P_8021Q) ||
skb->len < VLAN_ETH_HLEN)) {
key->eth.tci = 0;
return 0;
}
err = skb_vlan_pop(skb);
if (vlan_tx_tag_present(skb))
invalidate_flow_key(key);
err = __pop_vlan_tci(skb, &tci);
if (unlikely(err))
else
key->eth.tci = 0;
return err;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
return 0;
}
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
if (unlikely(vlan_tx_tag_present(skb))) {
u16 current_tag;
/* push down current VLAN tag */
current_tag = vlan_tx_tag_get(skb);
if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
return -ENOMEM;
/* Update mac_len for subsequent MPLS actions */
skb->mac_len += VLAN_HLEN;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(skb->csum, csum_partial(skb->data
+ (2 * ETH_ALEN), VLAN_HLEN, 0));
if (vlan_tx_tag_present(skb))
invalidate_flow_key(key);
} else {
else
key->eth.tci = vlan->vlan_tci;
}
__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
return 0;
return skb_vlan_push(skb, vlan->vlan_tpid,
ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_key_ethernet *eth_key)
{
int err;
err = make_writable(skb, ETH_HLEN);
err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
......@@ -415,7 +335,7 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
struct iphdr *nh;
int err;
err = make_writable(skb, skb_network_offset(skb) +
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(struct iphdr));
if (unlikely(err))
return err;
......@@ -453,7 +373,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
__be32 *saddr;
__be32 *daddr;
err = make_writable(skb, skb_network_offset(skb) +
err = skb_ensure_writable(skb, skb_network_offset(skb) +
sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
......@@ -496,7 +416,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
return 0;
}
/* Must follow make_writable() since that can move the skb data. */
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
......@@ -526,7 +446,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
struct udphdr *uh;
int err;
err = make_writable(skb, skb_transport_offset(skb) +
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
sizeof(struct udphdr));
if (unlikely(err))
return err;
......@@ -551,7 +471,7 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
struct tcphdr *th;
int err;
err = make_writable(skb, skb_transport_offset(skb) +
err = skb_ensure_writable(skb, skb_transport_offset(skb) +
sizeof(struct tcphdr));
if (unlikely(err))
return err;
......@@ -577,7 +497,7 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
int err;
unsigned int sctphoff = skb_transport_offset(skb);
err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
if (unlikely(err))
return err;
......@@ -872,8 +792,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
case OVS_ACTION_ATTR_PUSH_VLAN:
err = push_vlan(skb, key, nla_data(a));
if (unlikely(err)) /* skb already freed. */
return err;
break;
case OVS_ACTION_ATTR_POP_VLAN:
......
......@@ -425,11 +425,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
if (!nskb)
return -ENOMEM;
nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM;
nskb->vlan_tci = 0;
skb = nskb;
}
......
......@@ -175,15 +175,11 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
goto err_free_rt;
}
if (vlan_tx_tag_present(skb)) {
if (unlikely(!__vlan_put_tag(skb,
skb->vlan_proto,
vlan_tx_tag_get(skb)))) {
skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_free_rt;
}
skb->vlan_tci = 0;
}
/* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen);
......
......@@ -686,6 +686,17 @@ config NET_ACT_CSUM
To compile this code as a module, choose M here: the
module will be called act_csum.
config NET_ACT_VLAN
tristate "Vlan manipulation"
depends on NET_CLS_ACT
---help---
Say Y here to push or pop vlan headers.
If unsure, say N.
To compile this code as a module, choose M here: the
module will be called act_vlan.
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
......
......@@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
......
/*
* Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_vlan.h>
#include <net/tc_act/tc_vlan.h>
#define VLAN_TAB_MASK 15
static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_vlan *v = a->priv;
int action;
int err;
spin_lock(&v->tcf_lock);
v->tcf_tm.lastuse = jiffies;
bstats_update(&v->tcf_bstats, skb);
action = v->tcf_action;
switch (v->tcfv_action) {
case TCA_VLAN_ACT_POP:
err = skb_vlan_pop(skb);
if (err)
goto drop;
break;
case TCA_VLAN_ACT_PUSH:
err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid);
if (err)
goto drop;
break;
default:
BUG();
}
goto unlock;
drop:
action = TC_ACT_SHOT;
v->tcf_qstats.drops++;
unlock:
spin_unlock(&v->tcf_lock);
return action;
}
static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
[TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
[TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
[TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
};
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *a,
int ovr, int bind)
{
struct nlattr *tb[TCA_VLAN_MAX + 1];
struct tc_vlan *parm;
struct tcf_vlan *v;
int action;
__be16 push_vid = 0;
__be16 push_proto = 0;
int ret = 0;
int err;
if (!nla)
return -EINVAL;
err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy);
if (err < 0)
return err;
if (!tb[TCA_VLAN_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_VLAN_PARMS]);
switch (parm->v_action) {
case TCA_VLAN_ACT_POP:
break;
case TCA_VLAN_ACT_PUSH:
if (!tb[TCA_VLAN_PUSH_VLAN_ID])
return -EINVAL;
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
if (push_vid >= VLAN_VID_MASK)
return -ERANGE;
if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
switch (push_proto) {
case htons(ETH_P_8021Q):
case htons(ETH_P_8021AD):
break;
default:
return -EPROTONOSUPPORT;
}
} else {
push_proto = htons(ETH_P_8021Q);
}
break;
default:
return -EINVAL;
}
action = parm->v_action;
if (!tcf_hash_check(parm->index, a, bind)) {
ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
if (ret)
return ret;
ret = ACT_P_CREATED;
} else {
if (bind)
return 0;
tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
v = to_vlan(a);
spin_lock_bh(&v->tcf_lock);
v->tcfv_action = action;
v->tcfv_push_vid = push_vid;
v->tcfv_push_proto = push_proto;
v->tcf_action = parm->action;
spin_unlock_bh(&v->tcf_lock);
if (ret == ACT_P_CREATED)
tcf_hash_insert(a);
return ret;
}
static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_vlan *v = a->priv;
struct tc_vlan opt = {
.index = v->tcf_index,
.refcnt = v->tcf_refcnt - ref,
.bindcnt = v->tcf_bindcnt - bind,
.action = v->tcf_action,
.v_action = v->tcfv_action,
};
struct tcf_t t;
if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto)))
goto nla_put_failure;
t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse);
t.expires = jiffies_to_clock_t(v->tcf_tm.expires);
if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
.owner = THIS_MODULE,
.act = tcf_vlan,
.dump = tcf_vlan_dump,
.init = tcf_vlan_init,
};
static int __init vlan_init_module(void)
{
return tcf_register_action(&act_vlan_ops, VLAN_TAB_MASK);
}
static void __exit vlan_cleanup_module(void)
{
tcf_unregister_action(&act_vlan_ops);
}
module_init(vlan_init_module);
module_exit(vlan_cleanup_module);
MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
MODULE_DESCRIPTION("vlan manipulation actions");
MODULE_LICENSE("GPL v2");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment