Commit 388665a9 authored by David S. Miller's avatar David S. Miller

Merge branch 'pxa168_eth-perf'

Jisheng Zhang says:

====================
net: pxa168_eth: improve performance

This series is to improve the pxa168_eth driver performance by using
{readl|writel}_relaxed or appropriate memory barriers.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8be0cfa4 b17d1559
...@@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev); ...@@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset) static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{ {
return readl(pep->base + offset); return readl_relaxed(pep->base + offset);
} }
static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
{ {
writel(data, pep->base + offset); writel_relaxed(data, pep->base + offset);
} }
static void abort_dma(struct pxa168_eth_private *pep) static void abort_dma(struct pxa168_eth_private *pep)
...@@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev) ...@@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev)
pep->rx_skb[used_rx_desc] = skb; pep->rx_skb[used_rx_desc] = skb;
/* Return the descriptor to DMA ownership */ /* Return the descriptor to DMA ownership */
wmb(); dma_wmb();
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
wmb(); dma_wmb();
/* Move the used descriptor pointer to the next descriptor */ /* Move the used descriptor pointer to the next descriptor */
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
...@@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget) ...@@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget)
rx_used_desc = pep->rx_used_desc_q; rx_used_desc = pep->rx_used_desc_q;
rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
cmd_sts = rx_desc->cmd_sts; cmd_sts = rx_desc->cmd_sts;
rmb(); dma_rmb();
if (cmd_sts & (BUF_OWNED_BY_DMA)) if (cmd_sts & (BUF_OWNED_BY_DMA))
break; break;
skb = pep->rx_skb[rx_curr_desc]; skb = pep->rx_skb[rx_curr_desc];
...@@ -1287,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1287,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
wmb(); dma_wmb();
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
wmb(); wmb();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment