Commit 04c1e5a1 authored by David S. Miller's avatar David S. Miller

Major revamp of VLAN layer:

1) Add hw acceleration hooks for device drivers.
2) Move private declarations out of public includes.
3) Mark file local functions and data as static.
4) Use a small hash table for VLAN group lookups.
5) Correct all the locking and device ref counting.
6) No longer mark it as CONFIG_EXPERIMENTAL.
parent c41cbcb7
......@@ -52,70 +52,16 @@ struct vlan_hdr {
unsigned short h_vlan_encapsulated_proto; /* packet type ID field (or len) */
};
/* Find a VLAN device by the MAC address of it's Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be
* looked at if we are configured to have a seperate set of VLANs per
* each MAC addressable interface. Note that this latter option does
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*/
struct net_device *find_802_1Q_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
#define VLAN_VID_MASK 0xfff
/* found in af_inet.c */
extern int (*vlan_ioctl_hook)(unsigned long arg);
/* found in vlan_dev.c */
struct net_device_stats* vlan_dev_get_stats(struct net_device* dev);
int vlan_dev_rebuild_header(struct sk_buff *skb);
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype);
int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len);
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
int vlan_dev_init(struct net_device* dev);
void vlan_dev_destruct(struct net_device* dev);
void vlan_dev_copy_and_sum(struct sk_buff *dest, unsigned char *src,
int length, int base);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
/* VLAN multicast stuff */
/* Delete all of the MC list entries from this vlan device. Also deals
* with the underlying device...
*/
void vlan_flush_mc_list(struct net_device* dev);
/* copy the mc_list into the vlan_info structure. */
void vlan_copy_mc_list(struct dev_mc_list* mc_list, struct vlan_dev_info* vlan_info);
/** dmi is a single entry into a dev_mc_list, a single node. mc_list is
* an entire list, and we'll iterate through it.
*/
int vlan_should_add_mc(struct dev_mc_list *dmi, struct dev_mc_list *mc_list);
/** Taken from Gleb + Lennert's VLAN code, and modified... */
void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
int vlan_collection_add_vlan(struct vlan_collection* vc, unsigned short vlan_id,
unsigned short flags);
int vlan_collection_remove_vlan(struct vlan_collection* vc,
struct net_device* vlan_dev);
int vlan_collection_remove_vlan_id(struct vlan_collection* vc, unsigned short vlan_id);
/* found in vlan.c */
/* Our listing of VLAN group(s) */
extern struct vlan_group* p802_1Q_vlan_list;
#define VLAN_NAME "vlan"
/* if this changes, algorithm will have to be reworked because this
* depends on completely exhausting the VLAN identifier space. Thus
* it gives constant time look-up, but it many cases it wastes memory.
* it gives constant time look-up, but in many cases it wastes memory.
*/
#define VLAN_GROUP_ARRAY_LEN 4096
......@@ -170,56 +116,73 @@ struct vlan_dev_info {
/* inline functions */
/* Used in vlan_skb_recv */
static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
static inline struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
{
if (VLAN_DEV_INFO(skb->dev)->flags & 1) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
/* Lifted from Gleb's VLAN code... */
memmove(skb->data - ETH_HLEN,
skb->data - VLAN_ETH_HLEN, 12);
skb->mac.raw += VLAN_HLEN;
}
}
return skb;
return &(VLAN_DEV_INFO(dev)->dev_stats);
}
static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev,
struct sk_buff* skb)
static inline __u32 vlan_get_ingress_priority(struct net_device *dev,
unsigned short vlan_tag)
{
struct vlan_priority_tci_mapping *mp =
VLAN_DEV_INFO(dev)->egress_priority_map[(skb->priority & 0xF)];
while (mp) {
if (mp->priority == skb->priority) {
return mp->vlan_qos; /* This should already be shifted to mask
* correctly with the VLAN's TCI
*/
}
mp = mp->next;
}
return 0;
}
struct vlan_dev_info *vip = VLAN_DEV_INFO(dev);
static inline int vlan_dmi_equals(struct dev_mc_list *dmi1,
struct dev_mc_list *dmi2)
{
return ((dmi1->dmi_addrlen == dmi2->dmi_addrlen) &&
(memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0));
return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
}
static inline void vlan_destroy_mc_list(struct dev_mc_list *mc_list)
/* VLAN tx hw acceleration helpers. */
struct vlan_skb_tx_cookie {
u32 magic;
u32 vlan_tag;
};
#define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */
#define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0]))
#define vlan_tx_tag_present(__skb) \
(VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
/* VLAN rx hw acceleration helper. This acts like netif_rx(). */
static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
unsigned short vlan_tag)
{
struct dev_mc_list *dmi = mc_list;
struct dev_mc_list *next;
struct net_device_stats *stats;
while(dmi) {
next = dmi->next;
kfree(dmi);
dmi = next;
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
if (skb->dev == NULL) {
kfree_skb(skb);
/* Not NET_RX_DROP, this is not being dropped
* due to congestion.
*/
return 0;
}
skb->dev->last_rx = jiffies;
stats = vlan_dev_get_stats(skb->dev);
stats->rx_packets++;
stats->rx_bytes += skb->len;
skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
* This allows the VLAN to have a different MAC than the underlying
* device, and still route correctly.
*/
if (!memcmp(skb->mac.ethernet->h_dest, skb->dev->dev_addr, ETH_ALEN))
skb->pkt_type = PACKET_HOST;
break;
};
return netif_rx(skb);
}
#endif /* __KERNEL__ */
......
......@@ -40,6 +40,7 @@
#endif
struct divert_blk;
struct vlan_group;
#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
functions are available. */
......@@ -357,6 +358,10 @@ struct net_device
#define NETIF_F_DYNALLOC 16 /* Self-dectructable device. */
#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
/* Called after device is detached from network. */
void (*uninit)(struct net_device *dev);
......@@ -398,6 +403,13 @@ struct net_device
#define HAVE_TX_TIMEOUT
void (*tx_timeout) (struct net_device *dev);
void (*vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
int (*hard_header_parse)(struct sk_buff *skb,
unsigned char *haddr);
int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
......
This diff is collapsed.
......@@ -30,14 +30,48 @@ I'll bet they might prove useful again... --Ben
extern unsigned short vlan_name_type;
/* Counter for how many NON-VLAN protos we've received on a VLAN. */
extern unsigned long vlan_bad_proto_recvd;
int vlan_ioctl_handler(unsigned long arg);
/* Add some headers for the public VLAN methods. */
int unregister_802_1Q_vlan_device(const char* vlan_IF_name);
struct net_device *register_802_1Q_vlan_device(const char* eth_IF_name,
unsigned short VID);
#define VLAN_GRP_HASH_SHIFT 5
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
extern struct vlan_group *vlan_group_hash[VLAN_GRP_HASH_SIZE];
extern spinlock_t vlan_group_lock;
/* Find a VLAN device by the MAC address of it's Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be
* looked at if we are configured to have a seperate set of VLANs per
* each MAC addressable interface. Note that this latter option does
* NOT follow the spec for VLANs, but may be useful for doing very
* large quantities of VLAN MUX/DEMUX onto FrameRelay or ATM PVCs.
*
* Must be invoked with vlan_group_lock held and that lock MUST NOT
* be dropped until a reference is obtained on the returned device.
* You may drop the lock earlier if you are running under the RTNL
* semaphore, however.
*/
struct net_device *__find_vlan_dev(struct net_device* real_dev,
unsigned short VID); /* vlan.c */
/* found in vlan_dev.c */
int vlan_dev_rebuild_header(struct sk_buff *skb);
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype);
int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, void *daddr, void *saddr,
unsigned len);
int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
int vlan_dev_set_mac_address(struct net_device *dev, void* addr);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
int vlan_dev_init(struct net_device* dev);
void vlan_dev_destruct(struct net_device* dev);
int vlan_dev_set_ingress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_egress_priority(char* dev_name, __u32 skb_prio, short vlan_prio);
int vlan_dev_set_vlan_flag(char* dev_name, __u32 flag, short flag_val);
void vlan_dev_set_multicast_list(struct net_device *vlan_dev);
#endif /* !(__BEN_VLAN_802_1Q_INC__) */
This diff is collapsed.
......@@ -272,7 +272,7 @@ static int vlan_proc_get_vlan_info(char* buf, unsigned int cnt)
{
struct net_device *vlandev = NULL;
struct vlan_group *grp = NULL;
int i = 0;
int h, i;
char *nm_type = NULL;
struct vlan_dev_info *dev_info = NULL;
......@@ -292,46 +292,34 @@ static int vlan_proc_get_vlan_info(char* buf, unsigned int cnt)
nm_type = "UNKNOWN";
}
cnt += sprintf(buf + cnt, "Name-Type: %s bad_proto_recvd: %lu\n",
nm_type, vlan_bad_proto_recvd);
cnt += sprintf(buf + cnt, "Name-Type: %s\n", nm_type);
for (grp = p802_1Q_vlan_list; grp != NULL; grp = grp->next) {
/* loop through all devices for this device */
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": found a group, addr: %p\n",grp);
#endif
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
if (!vlandev)
continue;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__
": found a vlan_dev, addr: %p\n", vlandev);
#endif
if ((cnt + 100) > VLAN_PROC_BUFSZ) {
if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
cnt += sprintf(buf+cnt, "%s", term_msg);
spin_lock_bh(&vlan_group_lock);
for (h = 0; h < VLAN_GRP_HASH_SIZE; h++) {
for (grp = vlan_group_hash[h]; grp != NULL; grp = grp->next) {
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
vlandev = grp->vlan_devices[i];
if (!vlandev)
continue;
return cnt;
}
if (!vlandev->priv) {
printk(KERN_ERR __FUNCTION__
": ERROR: vlandev->priv is NULL\n");
continue;
}
if ((cnt + 100) > VLAN_PROC_BUFSZ) {
if ((cnt+strlen(term_msg)) < VLAN_PROC_BUFSZ)
cnt += sprintf(buf+cnt, "%s", term_msg);
dev_info = VLAN_DEV_INFO(vlandev);
goto out;
}
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__
": got a good vlandev, addr: %p\n",
VLAN_DEV_INFO(vlandev));
#endif
cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
vlandev->name, dev_info->vlan_id,
dev_info->real_dev->name);
dev_info = VLAN_DEV_INFO(vlandev);
cnt += sprintf(buf + cnt, "%-15s| %d | %s\n",
vlandev->name,
dev_info->vlan_id,
dev_info->real_dev->name);
}
}
}
out:
spin_unlock_bh(&vlan_group_lock);
return cnt;
}
......@@ -365,11 +353,7 @@ static int vlandev_get_info(char *buf, char **start,
int cnt = 0;
int i;
#ifdef VLAN_DEBUG
printk(VLAN_DBG __FUNCTION__ ": vlandev: %p\n", vlandev);
#endif
if ((vlandev == NULL) || (!vlandev->priv_flags & IFF_802_1Q_VLAN))
if ((vlandev == NULL) || (!(vlandev->priv_flags & IFF_802_1Q_VLAN)))
return 0;
dev_info = VLAN_DEV_INFO(vlandev);
......@@ -426,7 +410,7 @@ static int vlandev_get_info(char *buf, char **start,
cnt += sprintf(buf + cnt, "EGRESSS priority Mappings: ");
for (i = 0; i<16; i++) {
for (i = 0; i < 16; i++) {
mp = dev_info->egress_priority_map[i];
while (mp) {
cnt += sprintf(buf + cnt, "%lu:%hu ",
......
......@@ -44,10 +44,8 @@ if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate ' Multi-Protocol Over ATM (MPOA) support' CONFIG_ATM_MPOA
fi
fi
dep_tristate '802.1Q VLAN Support (EXPERIMENTAL)' CONFIG_VLAN_8021Q $CONFIG_EXPERIMENTAL
fi
tristate '802.1Q VLAN Support' CONFIG_VLAN_8021Q
comment ' '
tristate 'The IPX protocol' CONFIG_IPX
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment