Commit e399553d authored by Pavel Belous's avatar Pavel Belous Committed by David S. Miller

net: ethernet: aquantia: Copying tx buffers is not needed.

This fix removes copying of tx biffers.
Now we use ring->buff_fing directly.
Signed-off-by: default avatarPavel Belous <pavel.belous@aquantia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 89b64388
...@@ -468,95 +468,116 @@ int aq_nic_start(struct aq_nic_s *self) ...@@ -468,95 +468,116 @@ int aq_nic_start(struct aq_nic_s *self)
return err; return err;
} }
static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self, static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
struct sk_buff *skb, struct sk_buff *skb,
struct aq_ring_buff_s *dx) struct aq_ring_s *ring)
{ {
unsigned int ret = 0U; unsigned int ret = 0U;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int frag_count = 0U; unsigned int frag_count = 0U;
unsigned int dx = ring->sw_tail;
struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
if (unlikely(skb_is_gso(skb))) {
dx_buff->flags = 0U;
dx_buff->len_pkt = skb->len;
dx_buff->len_l2 = ETH_HLEN;
dx_buff->len_l3 = ip_hdrlen(skb);
dx_buff->len_l4 = tcp_hdrlen(skb);
dx_buff->mss = skb_shinfo(skb)->gso_size;
dx_buff->is_txc = 1U;
dx = aq_ring_next_dx(ring, dx);
dx_buff = &ring->buff_ring[dx];
++ret;
}
dx->flags = 0U; dx_buff->flags = 0U;
dx->len = skb_headlen(skb); dx_buff->len = skb_headlen(skb);
dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len, dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
skb->data,
dx_buff->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dx->len_pkt = skb->len;
dx->is_sop = 1U;
dx->is_mapped = 1U;
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
goto exit;
dx_buff->len_pkt = skb->len;
dx_buff->is_sop = 1U;
dx_buff->is_mapped = 1U;
++ret; ++ret;
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
dx->is_tcp_cso = 1U : 0U;
dx_buff->is_tcp_cso =
(ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
dx->is_udp_cso = dx_buff->is_udp_cso =
(ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
} }
for (; nr_frags--; ++frag_count) { for (; nr_frags--; ++frag_count) {
unsigned int frag_len; unsigned int frag_len = 0U;
dma_addr_t frag_pa; dma_addr_t frag_pa;
skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
frag_len = skb_frag_size(frag); frag_len = skb_frag_size(frag);
frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
frag_len, DMA_TO_DEVICE); frag_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
goto mapping_error;
while (frag_len > AQ_CFG_TX_FRAME_MAX) { while (frag_len > AQ_CFG_TX_FRAME_MAX) {
++dx; dx = aq_ring_next_dx(ring, dx);
++ret; dx_buff = &ring->buff_ring[dx];
dx->flags = 0U;
dx->len = AQ_CFG_TX_FRAME_MAX; dx_buff->flags = 0U;
dx->pa = frag_pa; dx_buff->len = AQ_CFG_TX_FRAME_MAX;
dx->is_mapped = 1U; dx_buff->pa = frag_pa;
dx_buff->is_mapped = 1U;
frag_len -= AQ_CFG_TX_FRAME_MAX; frag_len -= AQ_CFG_TX_FRAME_MAX;
frag_pa += AQ_CFG_TX_FRAME_MAX; frag_pa += AQ_CFG_TX_FRAME_MAX;
++ret;
} }
++dx; dx = aq_ring_next_dx(ring, dx);
++ret; dx_buff = &ring->buff_ring[dx];
dx->flags = 0U; dx_buff->flags = 0U;
dx->len = frag_len; dx_buff->len = frag_len;
dx->pa = frag_pa; dx_buff->pa = frag_pa;
dx->is_mapped = 1U; dx_buff->is_mapped = 1U;
++ret;
} }
dx->is_eop = 1U; dx_buff->is_eop = 1U;
dx->skb = skb; dx_buff->skb = skb;
goto exit;
return ret;
}
static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self, mapping_error:
struct sk_buff *skb, for (dx = ring->sw_tail;
struct aq_ring_buff_s *dx) ret > 0;
{ --ret, dx = aq_ring_next_dx(ring, dx)) {
dx->flags = 0U; dx_buff = &ring->buff_ring[dx];
dx->len_pkt = skb->len;
dx->len_l2 = ETH_HLEN;
dx->len_l3 = ip_hdrlen(skb);
dx->len_l4 = tcp_hdrlen(skb);
dx->mss = skb_shinfo(skb)->gso_size;
dx->is_txc = 1U;
return 1U;
}
static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, if (!dx_buff->is_txc && dx_buff->pa) {
struct aq_ring_buff_s *dx) if (unlikely(dx_buff->is_sop)) {
{ dma_unmap_single(aq_nic_get_dev(self),
unsigned int ret = 0U; dx_buff->pa,
dx_buff->len,
if (unlikely(skb_is_gso(skb))) { DMA_TO_DEVICE);
ret = aq_nic_map_skb_lso(self, skb, dx); } else {
++dx; dma_unmap_page(aq_nic_get_dev(self),
dx_buff->pa,
dx_buff->len,
DMA_TO_DEVICE);
}
}
} }
ret += aq_nic_map_skb_frag(self, skb, dx); exit:
return ret; return ret;
} }
...@@ -571,7 +592,6 @@ __acquires(&ring->lock) ...@@ -571,7 +592,6 @@ __acquires(&ring->lock)
unsigned int trys = AQ_CFG_LOCK_TRYS; unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK; int err = NETDEV_TX_OK;
bool is_nic_in_bad_state; bool is_nic_in_bad_state;
struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
frags = skb_shinfo(skb)->nr_frags + 1; frags = skb_shinfo(skb)->nr_frags + 1;
...@@ -595,23 +615,27 @@ __acquires(&ring->lock) ...@@ -595,23 +615,27 @@ __acquires(&ring->lock)
do { do {
if (spin_trylock(&ring->header.lock)) { if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, &buffers[0]); frags = aq_nic_map_skb(self, skb, ring);
aq_ring_tx_append_buffs(ring, &buffers[0], frags);
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(
self->aq_hw,
ring, frags); ring, frags);
if (err >= 0) { if (err >= 0) {
if (aq_ring_avail_dx(ring) < if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1) AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx); aq_nic_ndev_queue_stop(
} self,
spin_unlock(&ring->header.lock); ring->idx);
if (err >= 0) {
++ring->stats.tx.packets; ++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len; ring->stats.tx.bytes += skb->len;
} }
} else {
err = NETDEV_TX_BUSY;
}
spin_unlock(&ring->header.lock);
break; break;
} }
} while (--trys); } while (--trys);
......
...@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self) ...@@ -104,25 +104,6 @@ int aq_ring_init(struct aq_ring_s *self)
return 0; return 0;
} }
void aq_ring_tx_append_buffs(struct aq_ring_s *self,
struct aq_ring_buff_s *buffer,
unsigned int buffers)
{
if (likely(self->sw_tail + buffers < self->size)) {
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * buffers);
} else {
unsigned int first_part = self->size - self->sw_tail;
unsigned int second_part = buffers - first_part;
memcpy(&self->buff_ring[self->sw_tail], buffer,
sizeof(buffer[0]) * first_part);
memcpy(&self->buff_ring[0], &buffer[first_part],
sizeof(buffer[0]) * second_part);
}
}
void aq_ring_tx_clean(struct aq_ring_s *self) void aq_ring_tx_clean(struct aq_ring_s *self)
{ {
struct device *dev = aq_nic_get_dev(self->aq_nic); struct device *dev = aq_nic_get_dev(self->aq_nic);
......
...@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, ...@@ -146,9 +146,6 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
int aq_ring_init(struct aq_ring_s *self); int aq_ring_init(struct aq_ring_s *self);
void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self);
void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
struct aq_ring_buff_s *buffer,
unsigned int buffers);
void aq_ring_tx_clean(struct aq_ring_s *self); void aq_ring_tx_clean(struct aq_ring_s *self);
int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
int aq_ring_rx_fill(struct aq_ring_s *self); int aq_ring_rx_fill(struct aq_ring_s *self);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment