Commit 25faa6a4 authored by Gerhard Engleder's avatar Gerhard Engleder Committed by David S. Miller

tsnep: Replace TX spin_lock with __netif_tx_lock

TX spin_lock can be eliminated, because the normal TX path is already
protected with __netif_tx_lock and this lock can be used for access to
queue outside of normal TX path too.
Signed-off-by: default avatarGerhard Engleder <gerhard@engleder-embedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 75943bc9
...@@ -78,8 +78,6 @@ struct tsnep_tx { ...@@ -78,8 +78,6 @@ struct tsnep_tx {
void *page[TSNEP_RING_PAGE_COUNT]; void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
/* TX ring lock */
spinlock_t lock;
struct tsnep_tx_entry entry[TSNEP_RING_SIZE]; struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
int write; int write;
int read; int read;
......
...@@ -434,7 +434,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) ...@@ -434,7 +434,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx) struct tsnep_tx *tx)
{ {
unsigned long flags;
int count = 1; int count = 1;
struct tsnep_tx_entry *entry; struct tsnep_tx_entry *entry;
int length; int length;
...@@ -444,16 +443,12 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, ...@@ -444,16 +443,12 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
if (skb_shinfo(skb)->nr_frags > 0) if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags; count += skb_shinfo(skb)->nr_frags;
spin_lock_irqsave(&tx->lock, flags);
if (tsnep_tx_desc_available(tx) < count) { if (tsnep_tx_desc_available(tx) < count) {
/* ring full, shall not happen because queue is stopped if full /* ring full, shall not happen because queue is stopped if full
* below * below
*/ */
netif_stop_queue(tx->adapter->netdev); netif_stop_queue(tx->adapter->netdev);
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -468,8 +463,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, ...@@ -468,8 +463,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
tx->dropped++; tx->dropped++;
spin_unlock_irqrestore(&tx->lock, flags);
netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -496,20 +489,19 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, ...@@ -496,20 +489,19 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev); netif_stop_queue(tx->adapter->netdev);
} }
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{ {
unsigned long flags;
int budget = 128;
struct tsnep_tx_entry *entry; struct tsnep_tx_entry *entry;
int count; struct netdev_queue *nq;
int budget = 128;
int length; int length;
int count;
spin_lock_irqsave(&tx->lock, flags); nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock(nq, smp_processor_id());
do { do {
if (tx->read == tx->write) if (tx->read == tx->write)
...@@ -568,18 +560,19 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) ...@@ -568,18 +560,19 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
netif_wake_queue(tx->adapter->netdev); netif_wake_queue(tx->adapter->netdev);
} }
spin_unlock_irqrestore(&tx->lock, flags); __netif_tx_unlock(nq);
return (budget != 0); return (budget != 0);
} }
static bool tsnep_tx_pending(struct tsnep_tx *tx) static bool tsnep_tx_pending(struct tsnep_tx *tx)
{ {
unsigned long flags;
struct tsnep_tx_entry *entry; struct tsnep_tx_entry *entry;
struct netdev_queue *nq;
bool pending = false; bool pending = false;
spin_lock_irqsave(&tx->lock, flags); nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock(nq, smp_processor_id());
if (tx->read != tx->write) { if (tx->read != tx->write) {
entry = &tx->entry[tx->read]; entry = &tx->entry[tx->read];
...@@ -589,7 +582,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx) ...@@ -589,7 +582,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx)
pending = true; pending = true;
} }
spin_unlock_irqrestore(&tx->lock, flags); __netif_tx_unlock(nq);
return pending; return pending;
} }
...@@ -615,8 +608,6 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, ...@@ -615,8 +608,6 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
tx->owner_counter = 1; tx->owner_counter = 1;
tx->increment_owner_counter = TSNEP_RING_SIZE - 1; tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
spin_lock_init(&tx->lock);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment