Commit 14f662b4 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Tony Nguyen

idpf: merge singleq and splitq &net_device_ops

It makes no sense to have a second &net_device_ops struct (800 bytes of
rodata) with only one difference in .ndo_start_xmit, which can easily
be just one `if`. This `if` is a drop in the ocean and you won't see
any difference.
Define unified idpf_xmit_start(). The preparation for sending is the
same, just call either idpf_tx_splitq_frame() or idpf_tx_singleq_frame()
depending on the active model to actually map and send the skb.
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Signed-off-by: default avatarAlexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 5a816aae
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h" #include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq; static const struct net_device_ops idpf_netdev_ops;
static const struct net_device_ops idpf_netdev_ops_singleq;
/** /**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
...@@ -764,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) ...@@ -764,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
} }
/* assign netdev_ops */ /* assign netdev_ops */
if (idpf_is_queue_model_split(vport->txq_model)) netdev->netdev_ops = &idpf_netdev_ops;
netdev->netdev_ops = &idpf_netdev_ops_splitq;
else
netdev->netdev_ops = &idpf_netdev_ops_singleq;
/* setup watchdog timeout value to be 5 second */ /* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ; netdev->watchdog_timeo = 5 * HZ;
...@@ -2352,24 +2348,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) ...@@ -2352,24 +2348,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0; mem->pa = 0;
} }
static const struct net_device_ops idpf_netdev_ops_splitq = { static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_splitq_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = idpf_set_mac,
.ndo_change_mtu = idpf_change_mtu,
.ndo_get_stats64 = idpf_get_stats64,
.ndo_set_features = idpf_set_features,
.ndo_tx_timeout = idpf_tx_timeout,
};
static const struct net_device_ops idpf_netdev_ops_singleq = {
.ndo_open = idpf_open, .ndo_open = idpf_open,
.ndo_stop = idpf_stop, .ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_singleq_start, .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check, .ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode, .ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
......
...@@ -351,7 +351,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq, ...@@ -351,7 +351,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
* *
* Returns NETDEV_TX_OK if sent, else an error code * Returns NETDEV_TX_OK if sent, else an error code
*/ */
static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q) struct idpf_tx_queue *tx_q)
{ {
struct idpf_tx_offload_params offload = { }; struct idpf_tx_offload_params offload = { };
...@@ -408,33 +408,6 @@ static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, ...@@ -408,33 +408,6 @@ static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
return idpf_tx_drop_skb(tx_q, skb); return idpf_tx_drop_skb(tx_q, skb);
} }
/**
* idpf_tx_singleq_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_tx_queue *tx_q;
tx_q = vport->txqs[skb_get_queue_mapping(skb)];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_OK;
}
return idpf_tx_singleq_frame(skb, tx_q);
}
/** /**
* idpf_tx_singleq_clean - Reclaim resources from queue * idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean * @tx_q: Tx queue to clean
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#include "idpf.h" #include "idpf.h"
#include "idpf_virtchnl.h" #include "idpf_virtchnl.h"
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
/** /**
* idpf_buf_lifo_push - push a buffer pointer onto stack * idpf_buf_lifo_push - push a buffer pointer onto stack
* @stack: pointer to stack struct * @stack: pointer to stack struct
...@@ -2702,7 +2705,7 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) ...@@ -2702,7 +2705,7 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
* E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
* header, 1 for segment payload, and then 7 for the fragments. * header, 1 for segment payload, and then 7 for the fragments.
*/ */
bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count) unsigned int count)
{ {
if (likely(count < max_bufs)) if (likely(count < max_bufs))
...@@ -2849,14 +2852,13 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, ...@@ -2849,14 +2852,13 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
} }
/** /**
* idpf_tx_splitq_start - Selects the right Tx queue to send buffer * idpf_tx_start - Selects the right Tx queue to send buffer
* @skb: send buffer * @skb: send buffer
* @netdev: network interface device structure * @netdev: network interface device structure
* *
* Returns NETDEV_TX_OK if sent, else an error code * Returns NETDEV_TX_OK if sent, else an error code
*/ */
netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
struct net_device *netdev)
{ {
struct idpf_vport *vport = idpf_netdev_to_vport(netdev); struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_tx_queue *tx_q; struct idpf_tx_queue *tx_q;
...@@ -2878,7 +2880,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, ...@@ -2878,7 +2880,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
if (idpf_is_queue_model_split(vport->txq_model))
return idpf_tx_splitq_frame(skb, tx_q); return idpf_tx_splitq_frame(skb, tx_q);
else
return idpf_tx_singleq_frame(skb, tx_q);
} }
/** /**
......
...@@ -1198,14 +1198,11 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, ...@@ -1198,14 +1198,11 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx); struct idpf_tx_buf *first, u16 ring_idx);
unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb); struct sk_buff *skb);
bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct net_device *netdev); struct idpf_tx_queue *tx_q);
netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count); u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment