Commit c3cf73c7 authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia-fixes'

Igor Russkikh says:

====================
net: aquantia: various fixes May, 2019

Here is a set of various bug fixes found on recent verification stage.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3e66b7cc 76f254d4
...@@ -223,10 +223,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) ...@@ -223,10 +223,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
bool aq_ring_tx_clean(struct aq_ring_s *self) bool aq_ring_tx_clean(struct aq_ring_s *self)
{ {
struct device *dev = aq_nic_get_dev(self->aq_nic); struct device *dev = aq_nic_get_dev(self->aq_nic);
unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; unsigned int budget;
for (; self->sw_head != self->hw_head && budget--; for (budget = AQ_CFG_TX_CLEAN_BUDGET;
self->sw_head = aq_ring_next_dx(self, self->sw_head)) { budget && self->sw_head != self->hw_head; budget--) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
if (likely(buff->is_mapped)) { if (likely(buff->is_mapped)) {
...@@ -251,6 +251,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) ...@@ -251,6 +251,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
buff->pa = 0U; buff->pa = 0U;
buff->eop_index = 0xffffU; buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
} }
return !!budget; return !!budget;
...@@ -298,35 +299,47 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -298,35 +299,47 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
unsigned int i = 0U; unsigned int i = 0U;
u16 hdr_len; u16 hdr_len;
if (buff->is_error)
continue;
if (buff->is_cleaned) if (buff->is_cleaned)
continue; continue;
if (!buff->is_eop) { if (!buff->is_eop) {
for (next_ = buff->next, buff_ = buff;
buff_ = &self->buff_ring[next_]; true; do {
next_ = buff_->next, next_ = buff_->next,
buff_ = &self->buff_ring[next_]) { buff_ = &self->buff_ring[next_];
is_rsc_completed = is_rsc_completed =
aq_ring_dx_in_range(self->sw_head, aq_ring_dx_in_range(self->sw_head,
next_, next_,
self->hw_head); self->hw_head);
if (unlikely(!is_rsc_completed)) { if (unlikely(!is_rsc_completed))
is_rsc_completed = false;
break; break;
}
if (buff_->is_eop) buff->is_error |= buff_->is_error;
break;
} } while (!buff_->is_eop);
if (!is_rsc_completed) { if (!is_rsc_completed) {
err = 0; err = 0;
goto err_exit; goto err_exit;
} }
if (buff->is_error) {
buff_ = buff;
do {
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
buff_->is_cleaned = true;
} while (!buff_->is_eop);
++self->stats.rx.errors;
continue;
}
}
if (buff->is_error) {
++self->stats.rx.errors;
continue;
} }
dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
...@@ -389,6 +402,12 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -389,6 +402,12 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
AQ_CFG_RX_FRAME_MAX); AQ_CFG_RX_FRAME_MAX);
page_ref_inc(buff_->rxdata.page); page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1; buff_->is_cleaned = 1;
buff->is_ip_cso &= buff_->is_ip_cso;
buff->is_udp_cso &= buff_->is_udp_cso;
buff->is_tcp_cso &= buff_->is_tcp_cso;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop); } while (!buff_->is_eop);
} }
} }
......
...@@ -266,12 +266,11 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, ...@@ -266,12 +266,11 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
*/ */
hw_atl_rpo_lro_max_coalescing_interval_set(self, 50); hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
hw_atl_rpo_lro_qsessions_lim_set(self, 1U); hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
hw_atl_rpo_lro_total_desc_lim_set(self, 2U); hw_atl_rpo_lro_total_desc_lim_set(self, 2U);
hw_atl_rpo_lro_patch_optimization_en_set(self, 0U); hw_atl_rpo_lro_patch_optimization_en_set(self, 1U);
hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U); hw_atl_rpo_lro_min_pay_of_first_pkt_set(self, 10U);
...@@ -713,38 +712,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, ...@@ -713,38 +712,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) { if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */ /* MAC error or DMA error */
buff->is_error = 1U; buff->is_error = 1U;
} else { }
if (self->aq_nic_cfg->is_rss) { if (self->aq_nic_cfg->is_rss) {
/* last 4 byte */ /* last 4 byte */
u16 rss_type = rxd_wb->type & 0xFU; u16 rss_type = rxd_wb->type & 0xFU;
if (rss_type && rss_type < 0x8U) { if (rss_type && rss_type < 0x8U) {
buff->is_hash_l4 = (rss_type == 0x4 || buff->is_hash_l4 = (rss_type == 0x4 ||
rss_type == 0x5); rss_type == 0x5);
buff->rss_hash = rxd_wb->rss_hash; buff->rss_hash = rxd_wb->rss_hash;
}
} }
}
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len % buff->len = rxd_wb->pkt_len %
AQ_CFG_RX_FRAME_MAX; AQ_CFG_RX_FRAME_MAX;
buff->len = buff->len ? buff->len = buff->len ?
buff->len : AQ_CFG_RX_FRAME_MAX; buff->len : AQ_CFG_RX_FRAME_MAX;
buff->next = 0U; buff->next = 0U;
buff->is_eop = 1U; buff->is_eop = 1U;
} else {
buff->len =
rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
rxd_wb->status) {
/* LRO */
buff->next = rxd_wb->next_desc_ptr;
++ring->stats.rx.lro_packets;
} else { } else {
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT & /* jumbo */
rxd_wb->status) { buff->next =
/* LRO */ aq_ring_next_dx(ring,
buff->next = rxd_wb->next_desc_ptr; ring->hw_head);
++ring->stats.rx.lro_packets; ++ring->stats.rx.jumbo_packets;
} else {
/* jumbo */
buff->next =
aq_ring_next_dx(ring,
ring->hw_head);
++ring->stats.rx.jumbo_packets;
}
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment