Commit 278175ab authored by Pavel Belous's avatar Pavel Belous Committed by David S. Miller

net:ethernet:aquantia: Extra spinlocks removed.

This patch removes datapath spinlocks which does not perform any
useful work.

Fixes: 6e70637f9f1e ("net: ethernet: aquantia: Add ring support code")
Signed-off-by: default avatarPavel Belous <Pavel.Belous@aquantia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent edbd58be
......@@ -597,14 +597,11 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
}
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
__releases(&ring->lock)
__acquires(&ring->lock)
{
struct aq_ring_s *ring = NULL;
unsigned int frags = 0U;
unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
unsigned int tc = 0U;
unsigned int trys = AQ_CFG_LOCK_TRYS;
int err = NETDEV_TX_OK;
bool is_nic_in_bad_state;
......@@ -628,36 +625,21 @@ __acquires(&ring->lock)
goto err_exit;
}
do {
if (spin_trylock(&ring->header.lock)) {
frags = aq_nic_map_skb(self, skb, ring);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(
self->aq_hw,
ring, frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) <
AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(
self,
ring->idx);
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
}
frags = aq_nic_map_skb(self, skb, ring);
spin_unlock(&ring->header.lock);
break;
}
} while (--trys);
if (likely(frags)) {
err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
ring,
frags);
if (err >= 0) {
if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
aq_nic_ndev_queue_stop(self, ring->idx);
if (!trys) {
++ring->stats.tx.packets;
ring->stats.tx.bytes += skb->len;
}
} else {
err = NETDEV_TX_BUSY;
goto err_exit;
}
err_exit:
......
......@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
self->hw_head = 0;
self->sw_head = 0;
self->sw_tail = 0;
spin_lock_init(&self->header.lock);
return 0;
}
......
......@@ -17,7 +17,6 @@
#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
struct aq_obj_s {
spinlock_t lock; /* spinlock for nic/rings processing */
atomic_t flags;
};
......
......@@ -34,8 +34,6 @@ struct aq_vec_s {
#define AQ_VEC_RX_ID 1
static int aq_vec_poll(struct napi_struct *napi, int budget)
__releases(&self->lock)
__acquires(&self->lock)
{
struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
struct aq_ring_s *ring = NULL;
......@@ -47,7 +45,7 @@ __acquires(&self->lock)
if (!self) {
err = -EINVAL;
} else if (spin_trylock(&self->header.lock)) {
} else {
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
if (self->aq_hw_ops->hw_ring_tx_head_update) {
......@@ -105,11 +103,8 @@ __acquires(&self->lock)
self->aq_hw_ops->hw_irq_enable(self->aq_hw,
1U << self->aq_ring_param.vec_idx);
}
err_exit:
spin_unlock(&self->header.lock);
}
err_exit:
return work_done;
}
......@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
spin_lock_init(&self->header.lock);
for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) {
err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment