Commit 472148c3 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Update ixgbe Tx flags to improve code efficiency

This change is meant to improve the efficiency of the Tx flags in ixgbe by
aligning them with the values that will later be written into either the
cmd_type or olinfo.  By doing this we are able to reduce most of these
functions to either just a simple shift followed by an or in the case of
cmd_type, or an and followed by an or in the case of olinfo.

To do this I also needed to change the logic and/or drop some flags.  I
dropped the IXGBE_TX_FLAGS_FSO and it was replaced by IXGBE_TX_FLAGS_TSO since
the only place it was ever checked was in conjunction with IXGBE_TX_FLAGS_TSO.
I replaced IXGBE_TX_FLAGS_TXSW with IXGBE_TX_FLAGS_CC, this way we have a
clear point for what the flag is meant to do.  Finally the
IXGBE_TX_FLAGS_NO_IFCS was dropped since were are already carrying the data
for that flag in the skb.  Instead we can just check the bitflag in the skb.

In order to avoid type conversion errors I also adjusted the locations
where we were switching between CPU and little endian.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent c44f5f51
...@@ -96,16 +96,23 @@ ...@@ -96,16 +96,23 @@
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_TX_FLAGS_CSUM (u32)(1) enum ixgbe_tx_flags {
#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1) /* cmd_type flags */
#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2) IXGBE_TX_FLAGS_HW_VLAN = 0x01,
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3) IXGBE_TX_FLAGS_TSO = 0x02,
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4) IXGBE_TX_FLAGS_TSTAMP = 0x04,
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) /* olinfo flags */
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) IXGBE_TX_FLAGS_CC = 0x08,
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) IXGBE_TX_FLAGS_IPV4 = 0x10,
#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) IXGBE_TX_FLAGS_CSUM = 0x20,
/* software defined flags */
IXGBE_TX_FLAGS_SW_VLAN = 0x40,
IXGBE_TX_FLAGS_FCOE = 0x80,
};
/* VLAN info */
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
......
...@@ -544,11 +544,11 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, ...@@ -544,11 +544,11 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size); skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
first->tx_flags |= IXGBE_TX_FLAGS_FSO; first->tx_flags |= IXGBE_TX_FLAGS_TSO;
} }
/* set flag indicating FCOE to ixgbe_tx_map call */ /* set flag indicating FCOE to ixgbe_tx_map call */
first->tx_flags |= IXGBE_TX_FLAGS_FCOE; first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
/* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
......
...@@ -5968,12 +5968,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ...@@ -5968,12 +5968,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
if (unlikely(skb->no_fcs)) !(first->tx_flags & IXGBE_TX_FLAGS_CC))
first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; return;
if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
return;
}
} else { } else {
u8 l4_hdr = 0; u8 l4_hdr = 0;
switch (first->protocol) { switch (first->protocol) {
...@@ -6031,30 +6028,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ...@@ -6031,30 +6028,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
type_tucmd, mss_l4len_idx); type_tucmd, mss_l4len_idx);
} }
static __le32 ixgbe_tx_cmd_type(u32 tx_flags) #define IXGBE_SET_FLAG(_input, _flag, _result) \
((_flag <= _result) ? \
((u32)(_input & _flag) * (_result / _flag)) : \
((u32)(_input & _flag) / (_flag / _result)))
static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{ {
/* set type for advanced descriptor with frame checksum insertion */ /* set type for advanced descriptor with frame checksum insertion */
__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT); IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */ /* set HW vlan bit if vlan is present */
if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); IXGBE_ADVTXD_DCMD_VLE);
if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
/* set segmentation enable bits for TSO/FSO */ /* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) IXGBE_ADVTXD_DCMD_TSE);
#else
if (tx_flags & IXGBE_TX_FLAGS_TSO) /* set timestamp bit if present */
#endif cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); IXGBE_ADVTXD_MAC_TSTAMP);
/* insert frame checksum */ /* insert frame checksum */
if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type; return cmd_type;
} }
...@@ -6062,28 +6061,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) ...@@ -6062,28 +6061,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen) u32 tx_flags, unsigned int paylen)
{ {
__le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */ /* enable L4 checksum for TSO and TX checksum offload */
if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= IXGBE_SET_FLAG(tx_flags,
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); IXGBE_TX_FLAGS_CSUM,
IXGBE_ADVTXD_POPTS_TXSM);
/* enble IPv4 checksum for TSO */ /* enble IPv4 checksum for TSO */
if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_SET_FLAG(tx_flags,
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); IXGBE_TX_FLAGS_IPV4,
IXGBE_ADVTXD_POPTS_IXSM);
/* /*
* Check Context must be set if Tx switch is enabled, which it * Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running * always is for case where virtual functions are running
*/ */
#ifdef IXGBE_FCOE olinfo_status |= IXGBE_SET_FLAG(tx_flags,
if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) IXGBE_TX_FLAGS_CC,
#else IXGBE_ADVTXD_CC);
if (tx_flags & IXGBE_TX_FLAGS_TXSW)
#endif
olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
tx_desc->read.olinfo_status = olinfo_status; tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
} }
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
...@@ -6102,13 +6100,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6102,13 +6100,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
unsigned int paylen = skb->len - hdr_len; unsigned int paylen = skb->len - hdr_len;
u32 tx_flags = first->tx_flags; u32 tx_flags = first->tx_flags;
__le32 cmd_type; u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
tx_desc = IXGBE_TX_DESC(tx_ring, i); tx_desc = IXGBE_TX_DESC(tx_ring, i);
ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
cmd_type = ixgbe_tx_cmd_type(tx_flags);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) { if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
...@@ -6134,7 +6131,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6134,7 +6131,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
for (;;) { for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len = tx_desc->read.cmd_type_len =
cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
i++; i++;
tx_desc++; tx_desc++;
...@@ -6153,7 +6150,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6153,7 +6150,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
if (likely(!data_len)) if (likely(!data_len))
break; break;
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++; i++;
tx_desc++; tx_desc++;
...@@ -6185,8 +6182,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6185,8 +6182,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
} }
/* write last descriptor with RS and EOP bits */ /* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); cmd_type |= size | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cmd_type; tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
...@@ -6447,7 +6444,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6447,7 +6444,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* Tx switch had been disabled. * Tx switch had been disabled.
*/ */
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
tx_flags |= IXGBE_TX_FLAGS_TXSW; tx_flags |= IXGBE_TX_FLAGS_CC;
#endif #endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment