Commit beae0413 authored by David S. Miller's avatar David S. Miller

Merge branch 'aquantia-fixes'

Pavel Belous says:

====================
net:ethernet:aquantia: Atlantic driver Update 2017-08-23

This series contains updates for aQuantia Atlantic driver.

It has bugfixes and some improvements.

Changes in v2:
 - "MCP state change" fix removed (will be sent as
    a separate fix after further investigation.)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents edbd58be 6d3f58e0
......@@ -105,8 +105,7 @@ struct aq_hw_ops {
int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
int (*hw_get_link_status)(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int (*hw_get_link_status)(struct aq_hw_s *self);
int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
......
......@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
else
cfg->vecs = 1U;
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
......@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
struct aq_hw_link_status_s link_status;
struct aq_ring_stats_rx_s stats_rx;
struct aq_ring_stats_tx_s stats_tx;
if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
goto err_exit;
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
if (err < 0)
goto err_exit;
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
if (link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
self->link_status = self->aq_hw->aq_link_status;
self->link_status = link_status;
self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
self->aq_nic_cfg.is_interrupt_moderation);
if (self->link_status.mbps) {
aq_utils_obj_set(&self->header.flags,
AQ_NIC_FLAG_STARTED);
aq_utils_obj_clear(&self->header.flags,
AQ_NIC_LINK_DOWN);
netif_carrier_on(self->ndev);
} else {
netif_carrier_off(self->ndev);
aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
}
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
......@@ -597,14 +596,11 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
__releases(&ring->lock)
__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
......@@ -628,36 +624,21 @@ __acquires(&ring->lock)
goto err_exit;
}
do {
if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(
self->aq_hw,
ring, frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(
self,
ring->idx);
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
}
frags = aq_nic_map_skb(self, skb, ring);
spin_unlock(&ring->header.lock);
break;
}
} while (--trys);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
ring,
frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx);
if (!trys) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
goto err_exit;
}
err_exit:
......@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
netdev_for_each_mc_addr(ha, ndev) {
ether_addr_copy(self->mc_list.ar[i++], ha->addr);
++self->mc_list.count;
if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
break;
}
return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
/* Number of filters is too big: atlantic does not support this.
* Force all multi filter to support this.
* With this we disable all UC filters and setup "all pass"
* multicast mask
*/
self->packet_filter |= IFF_ALLMULTI;
self->aq_hw->aq_nic_cfg->mc_list_count = 0;
return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
self->packet_filter);
} else {
return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
self->mc_list.ar,
self->mc_list.count);
}
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
......
......@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
spin_lock_init(&self->header.lock);
return 0;
}
......
......@@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};
......
......@@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
__releases(&self->lock)
__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
......@@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
} else if (spin_trylock(&self->header.lock)) {
} else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
......@@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
err_exit:
spin_unlock(&self->header.lock);
}
err_exit:
return work_done;
}
......@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
spin_lock_init(&self->header.lock);
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
......
......@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
}
is_err &= ~0x18U;
......
......@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
else if (0x0U == (pkt_type & 0x1CU))
buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
/* Checksum offload workaround for small packets */
if (rxd_wb->pkt_len <= 60) {
buff->is_ip_cso = 0U;
buff->is_cso_err = 0U;
}
}
is_err &= ~0x18U;
......
......@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
if (err < 0)
pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
AQ_CFG_DRV_NAME,
aq_hw_caps->fw_ver_expected,
aq_hw_read_reg(self, 0x18U));
return err;
}
......@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
err_exit:;
}
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status)
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
{
u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
struct aq_hw_link_status_s *link_status = &self->aq_link_status;
if (!link_speed_mask) {
link_status->mbps = 0U;
......
......@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
enum hal_atl_utils_fw_state_e state);
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
struct aq_hw_link_status_s *link_status);
int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment