Commit eb977d99 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: clean up for vlan handling in hns3_fill_desc_vtags

This patch refactors the hns3_fill_desc_vtags function
by avoiding passing too many parameters, reducing indent
level and some other clean up.

This patch also adds the hns3_fill_skb_desc function to
fill the first desc of a skb.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Reviewed-by: default avatarPeng Li <lipeng321@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 13050921
...@@ -45,6 +45,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting"); ...@@ -45,6 +45,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
#define HNS3_INNER_VLAN_TAG 1
#define HNS3_OUTER_VLAN_TAG 2
/* hns3_pci_tbl - PCI Device ID Table /* hns3_pci_tbl - PCI Device ID Table
* *
* Last entry must be all 0s * Last entry must be all 0s
...@@ -961,16 +964,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) ...@@ -961,16 +964,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U); hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
} }
static int hns3_fill_desc_vtags(struct sk_buff *skb, static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct hns3_enet_ring *tx_ring, struct sk_buff *skb)
u32 *inner_vlan_flag,
u32 *out_vlan_flag,
u16 *inner_vtag,
u16 *out_vtag)
{ {
#define HNS3_TX_VLAN_PRIO_SHIFT 13
struct hnae3_handle *handle = tx_ring->tqp->handle; struct hnae3_handle *handle = tx_ring->tqp->handle;
struct vlan_ethhdr *vhdr;
int rc;
if (!(skb->protocol == htons(ETH_P_8021Q) ||
skb_vlan_tag_present(skb)))
return 0;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error. * header is allowed in skb, otherwise it will cause RAS error.
...@@ -981,8 +984,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, ...@@ -981,8 +984,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
return -EINVAL; return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) && if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features & !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack /* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to * sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype. * set the protocol to the encapsulated ethertype.
...@@ -992,45 +994,92 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, ...@@ -992,45 +994,92 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
} }
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
u16 vlan_tag;
vlan_tag = skb_vlan_tag_get(skb);
vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
/* Based on hw strategy, use out_vtag in two layer tag case, /* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case. * and use inner_vtag in one tag case.
*/ */
if (skb->protocol == htons(ETH_P_8021Q)) { if (skb->protocol == htons(ETH_P_8021Q) &&
if (handle->port_base_vlan_state == handle->port_base_vlan_state ==
HNAE3_PORT_BASE_VLAN_DISABLE){ HNAE3_PORT_BASE_VLAN_DISABLE)
hns3_set_field(*out_vlan_flag, rc = HNS3_OUTER_VLAN_TAG;
HNS3_TXD_OVLAN_B, 1); else
*out_vtag = vlan_tag; rc = HNS3_INNER_VLAN_TAG;
} else {
hns3_set_field(*inner_vlan_flag,
HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
}
} else if (skb->protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *vhdr;
int rc;
rc = skb_cow_head(skb, 0); skb->protocol = vlan_get_protocol(skb);
if (unlikely(rc < 0)) return rc;
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
<< HNS3_TX_VLAN_PRIO_SHIFT);
} }
rc = skb_cow_head(skb, 0);
if (unlikely(rc < 0))
return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
& VLAN_PRIO_MASK);
skb->protocol = vlan_get_protocol(skb); skb->protocol = vlan_get_protocol(skb);
return 0; return 0;
} }
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc)
{
u32 ol_type_vlan_len_msec = 0;
u32 type_cs_vlan_tso = 0;
u32 paylen = skb->len;
u16 inner_vtag = 0;
u16 out_vtag = 0;
u16 mss = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
inner_vtag = skb_vlan_tag_get(skb);
inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) {
out_vtag = skb_vlan_tag_get(skb);
out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret))
return ret;
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
if (unlikely(ret))
return ret;
}
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
return 0;
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int size, int frag_end, unsigned int size, int frag_end,
enum hns_desc_type type) enum hns_desc_type type)
...@@ -1045,50 +1094,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1045,50 +1094,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (type == DESC_TYPE_SKB) { if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv; struct sk_buff *skb = (struct sk_buff *)priv;
u32 ol_type_vlan_len_msec = 0;
u32 type_cs_vlan_tso = 0;
u32 paylen = skb->len;
u16 inner_vtag = 0;
u16 out_vtag = 0;
u16 mss = 0;
int ret; int ret;
ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, ret = hns3_fill_skb_desc(ring, skb, desc);
&ol_type_vlan_len_msec,
&inner_vtag, &out_vtag);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret))
return ret;
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso,
&ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
ret = hns3_set_tso(skb, &paylen, &mss,
&type_cs_vlan_tso);
if (unlikely(ret))
return ret;
}
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else { } else {
frag = (skb_frag_t *)priv; frag = (skb_frag_t *)priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment