Commit e032afc8 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Create separate functions for generating cmd_type and olinfo

This change is meant to improve the readability of the driver by separating
out the cmd_type configuration and the olinfo configuration into their own
functions.  By doing this it is much easier to determine which ingredients
go into setting up these to portions of the descriptor.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown  <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 8542db05
...@@ -130,7 +130,9 @@ union e1000_adv_tx_desc { ...@@ -130,7 +130,9 @@ union e1000_adv_tx_desc {
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ #define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
......
...@@ -231,7 +231,7 @@ struct igb_ring { ...@@ -231,7 +231,7 @@ struct igb_ring {
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ #define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \ #define IGB_RX_DESC(R, i) \
(&(((union e1000_adv_rx_desc *)((R)->desc))[i])) (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
......
...@@ -4103,6 +4103,50 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, ...@@ -4103,6 +4103,50 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring,
return (skb->ip_summed == CHECKSUM_PARTIAL); return (skb->ip_summed == CHECKSUM_PARTIAL);
} }
static __le32 igb_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
__le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
E1000_ADVTXD_DCMD_IFCS |
E1000_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */
if (tx_flags & IGB_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
/* set timestamp bit if present */
if (tx_flags & IGB_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
/* set segmentation bits for TSO */
if (tx_flags & IGB_TX_FLAGS_TSO)
cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
return cmd_type;
}
static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
struct igb_ring *tx_ring)
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
/* 82575 requires a unique index per ring if any offload is enabled */
if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
(tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
if (tx_flags & IGB_TX_FLAGS_CSUM) {
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
/* insert IPv4 checksum */
if (tx_flags & IGB_TX_FLAGS_IPV4)
olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
}
return cpu_to_le32(olinfo_status);
}
#define IGB_MAX_TXD_PWR 16 #define IGB_MAX_TXD_PWR 16
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
...@@ -4187,54 +4231,28 @@ static inline void igb_tx_queue(struct igb_ring *tx_ring, ...@@ -4187,54 +4231,28 @@ static inline void igb_tx_queue(struct igb_ring *tx_ring,
{ {
union e1000_adv_tx_desc *tx_desc; union e1000_adv_tx_desc *tx_desc;
struct igb_tx_buffer *buffer_info; struct igb_tx_buffer *buffer_info;
u32 olinfo_status = 0, cmd_type_len; __le32 olinfo_status, cmd_type;
unsigned int i = tx_ring->next_to_use; unsigned int i = tx_ring->next_to_use;
cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | cmd_type = igb_tx_cmd_type(tx_flags);
E1000_ADVTXD_DCMD_DEXT); olinfo_status = igb_tx_olinfo_status(tx_flags,
paylen - hdr_len,
if (tx_flags & IGB_TX_FLAGS_VLAN) tx_ring);
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
if (tx_flags & IGB_TX_FLAGS_TSTAMP)
cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
if (tx_flags & IGB_TX_FLAGS_TSO) {
cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
/* insert tcp checksum */
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
/* insert ip checksum */
if (tx_flags & IGB_TX_FLAGS_IPV4)
olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
} else if (tx_flags & IGB_TX_FLAGS_CSUM) {
olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
}
if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
(tx_flags & (IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_VLAN)))
olinfo_status |= tx_ring->reg_idx << 4;
olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
do { do {
buffer_info = &tx_ring->tx_buffer_info[i]; buffer_info = &tx_ring->tx_buffer_info[i];
tx_desc = IGB_TX_DESC(tx_ring, i); tx_desc = IGB_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
tx_desc->read.cmd_type_len = tx_desc->read.cmd_type_len = cmd_type |
cpu_to_le32(cmd_type_len | buffer_info->length); cpu_to_le32(buffer_info->length);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); tx_desc->read.olinfo_status = olinfo_status;
count--; count--;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
i = 0; i = 0;
} while (count > 0); } while (count > 0);
tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -4309,21 +4327,22 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -4309,21 +4327,22 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
} }
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IGB_TX_FLAGS_IPV4;
/* record the location of the first descriptor for this packet */ /* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len); tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) if (tso < 0) {
goto out_drop; goto out_drop;
else if (tso) } else if (tso) {
tx_flags |= IGB_TX_FLAGS_TSO; tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM;
else if (igb_tx_csum(tx_ring, skb, tx_flags) && if (skb->protocol == htons(ETH_P_IP))
(skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGB_TX_FLAGS_IPV4;
} else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_flags |= IGB_TX_FLAGS_CSUM; tx_flags |= IGB_TX_FLAGS_CSUM;
}
/* /*
* count reflects descriptors mapped, if 0 or less then mapping error * count reflects descriptors mapped, if 0 or less then mapping error
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment