Commit 3e587cf3 authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher

i40e/i40evf: force inline transmit functions

Inlining these functions gives us about 15% more 64 byte packets per
second when using pktgen. 13.3 million to 15 million with a single
queue.

Also fix the function names in i40evf to i40evf not i40e while we are
touching the function header.

Change-ID: I3294ae9b085cf438672b6db5f9af122490ead9d0
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarCatherine Sullivan <catherine.sullivan@intel.com>
Tested-by: default avatarJim Young <james.m.young@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 8f6a2b05
...@@ -2063,13 +2063,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2063,13 +2063,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#else
static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
#else
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
#endif #endif
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
...@@ -2412,9 +2412,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2412,9 +2412,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed * Returns 0 if stop is not needed
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else #else
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif #endif
{ {
if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
...@@ -2494,13 +2494,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -2494,13 +2494,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
#else
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif #endif
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
...@@ -2661,11 +2661,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2661,11 +2661,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* one descriptor. * one descriptor.
**/ **/
#ifdef I40E_FCOE #ifdef I40E_FCOE
int i40e_xmit_descriptor_count(struct sk_buff *skb, inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#else
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
#else
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#endif #endif
{ {
unsigned int f; unsigned int f;
......
...@@ -1347,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1347,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
} }
/** /**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* @flags: the tx flags to be set * @flags: the tx flags to be set
...@@ -1358,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1358,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the * Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly. * otherwise returns 0 to indicate the flags has been set properly.
**/ **/
static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, struct i40e_ring *tx_ring,
u32 *flags) u32 *flags)
{ {
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
u32 tx_flags = 0; u32 tx_flags = 0;
...@@ -1699,11 +1699,7 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1699,11 +1699,7 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* *
* Returns 0 if stop is not needed * Returns 0 if stop is not needed
**/ **/
#ifdef I40E_FCOE static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{ {
if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0; return 0;
...@@ -1711,7 +1707,7 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1711,7 +1707,7 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
} }
/** /**
* i40e_tx_map - Build the Tx descriptor * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* @skb: send buffer * @skb: send buffer
* @first: first buffer info buffer to use * @first: first buffer info buffer to use
...@@ -1720,9 +1716,9 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1720,9 +1716,9 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
**/ **/
static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
{ {
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
...@@ -1874,7 +1870,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1874,7 +1870,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
} }
/** /**
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer * @skb: send buffer
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
* *
...@@ -1882,8 +1878,8 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1882,8 +1878,8 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* there is not enough descriptors available in this ring since we need at least * there is not enough descriptors available in this ring since we need at least
* one descriptor. * one descriptor.
**/ **/
static int i40e_xmit_descriptor_count(struct sk_buff *skb, static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring) struct i40e_ring *tx_ring)
{ {
unsigned int f; unsigned int f;
int count = 0; int count = 0;
...@@ -1924,11 +1920,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1924,11 +1920,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0; u32 td_cmd = 0;
u8 hdr_len = 0; u8 hdr_len = 0;
int tso; int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop; goto out_drop;
/* obtain protocol of skb */ /* obtain protocol of skb */
...@@ -1971,8 +1967,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1971,8 +1967,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset); td_cmd, td_offset);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment