Commit 39a044f4 authored by David Arinzon's avatar David Arinzon Committed by Jakub Kicinski

net: ena: Pass ena_adapter instead of net_device to ena_xmit_common()

This change will enable the ability to use ena_xmit_common()
in functions that don't have a net_device pointer.
While it can be retrieved by dereferencing
ena_adapter (adapter->netdev), there's no reason to do it in
fast path code where this pointer is only needed for
debug prints.
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarDavid Arinzon <darinzon@amazon.com>
Link: https://lore.kernel.org/r/20240101190855.18739-3-darinzon@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent d000574d
...@@ -88,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu) ...@@ -88,19 +88,18 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
return ret; return ret;
} }
int ena_xmit_common(struct net_device *dev, int ena_xmit_common(struct ena_adapter *adapter,
struct ena_ring *ring, struct ena_ring *ring,
struct ena_tx_buffer *tx_info, struct ena_tx_buffer *tx_info,
struct ena_com_tx_ctx *ena_tx_ctx, struct ena_com_tx_ctx *ena_tx_ctx,
u16 next_to_use, u16 next_to_use,
u32 bytes) u32 bytes)
{ {
struct ena_adapter *adapter = netdev_priv(dev);
int rc, nb_hw_desc; int rc, nb_hw_desc;
if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
ena_tx_ctx))) { ena_tx_ctx))) {
netif_dbg(adapter, tx_queued, dev, netif_dbg(adapter, tx_queued, adapter->netdev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid); ring->qid);
ena_ring_tx_doorbell(ring); ena_ring_tx_doorbell(ring);
...@@ -115,7 +114,7 @@ int ena_xmit_common(struct net_device *dev, ...@@ -115,7 +114,7 @@ int ena_xmit_common(struct net_device *dev,
* ena_com_prepare_tx() are fatal and therefore require a device reset. * ena_com_prepare_tx() are fatal and therefore require a device reset.
*/ */
if (unlikely(rc)) { if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev, netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n"); "Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp); &ring->syncp);
...@@ -2599,7 +2598,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2599,7 +2598,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set flags and meta data */ /* set flags and meta data */
ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
rc = ena_xmit_common(dev, rc = ena_xmit_common(adapter,
tx_ring, tx_ring,
tx_info, tx_info,
&ena_tx_ctx, &ena_tx_ctx,
......
...@@ -426,7 +426,7 @@ static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring) ...@@ -426,7 +426,7 @@ static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
} }
int ena_xmit_common(struct net_device *dev, int ena_xmit_common(struct ena_adapter *adapter,
struct ena_ring *ring, struct ena_ring *ring,
struct ena_tx_buffer *tx_info, struct ena_tx_buffer *tx_info,
struct ena_com_tx_ctx *ena_tx_ctx, struct ena_com_tx_ctx *ena_tx_ctx,
......
...@@ -73,7 +73,7 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, ...@@ -73,7 +73,7 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
} }
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct net_device *dev, struct ena_adapter *adapter,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
int flags) int flags)
{ {
...@@ -93,7 +93,7 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, ...@@ -93,7 +93,7 @@ int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
ena_tx_ctx.req_id = req_id; ena_tx_ctx.req_id = req_id;
rc = ena_xmit_common(dev, rc = ena_xmit_common(adapter,
xdp_ring, xdp_ring,
tx_info, tx_info,
&ena_tx_ctx, &ena_tx_ctx,
...@@ -141,7 +141,7 @@ int ena_xdp_xmit(struct net_device *dev, int n, ...@@ -141,7 +141,7 @@ int ena_xdp_xmit(struct net_device *dev, int n,
spin_lock(&xdp_ring->xdp_tx_lock); spin_lock(&xdp_ring->xdp_tx_lock);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0)) if (ena_xdp_xmit_frame(xdp_ring, adapter, frames[i], 0))
break; break;
nxmit++; nxmit++;
} }
......
...@@ -36,7 +36,7 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, ...@@ -36,7 +36,7 @@ void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
int first, int count); int first, int count);
int ena_xdp_io_poll(struct napi_struct *napi, int budget); int ena_xdp_io_poll(struct napi_struct *napi, int budget);
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct net_device *dev, struct ena_adapter *adapter,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
int flags); int flags);
int ena_xdp_xmit(struct net_device *dev, int n, int ena_xdp_xmit(struct net_device *dev, int n,
...@@ -108,7 +108,7 @@ static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp ...@@ -108,7 +108,7 @@ static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock); spin_lock(&xdp_ring->xdp_tx_lock);
if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, if (ena_xdp_xmit_frame(xdp_ring, rx_ring->adapter, xdpf,
XDP_XMIT_FLUSH)) XDP_XMIT_FLUSH))
xdp_return_frame(xdpf); xdp_return_frame(xdpf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment