Commit 53bb20d1 authored by Troy Kisky's avatar Troy Kisky Committed by David S. Miller

net: fec: add variable reg_desc_active to speed things up

There is no need for complex macros every time we need to activate
a queue. Also, no need to call skb_get_queue_mapping when we already
know which queue it is using.
Signed-off-by: default avatarTroy Kisky <troy.kisky@boundarydevices.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7355f276
...@@ -310,12 +310,6 @@ struct bufdesc_ex { ...@@ -310,12 +310,6 @@ struct bufdesc_ex {
#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
(((X) == 2) ? \ (((X) == 2) ? \
FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
(((X) == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
(((X) == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
...@@ -454,6 +448,7 @@ struct bufdesc_prop { ...@@ -454,6 +448,7 @@ struct bufdesc_prop {
struct bufdesc *base; struct bufdesc *base;
struct bufdesc *last; struct bufdesc *last;
struct bufdesc *cur; struct bufdesc *cur;
void __iomem *reg_desc_active;
dma_addr_t dma; dma_addr_t dma;
unsigned short ring_size; unsigned short ring_size;
unsigned char dsize; unsigned char dsize;
......
...@@ -328,7 +328,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -328,7 +328,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp = txq->bd.cur; struct bufdesc *bdp = txq->bd.cur;
struct bufdesc_ex *ebdp; struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len; int frag, frag_len;
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
...@@ -361,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -361,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -415,7 +414,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -415,7 +414,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
dma_addr_t addr; dma_addr_t addr;
unsigned short status; unsigned short status;
unsigned short buflen; unsigned short buflen;
unsigned short queue;
unsigned int estatus = 0; unsigned int estatus = 0;
unsigned int index; unsigned int index;
int entries_free; int entries_free;
...@@ -444,7 +442,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -444,7 +442,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
bufaddr = skb->data; bufaddr = skb->data;
buflen = skb_headlen(skb); buflen = skb_headlen(skb);
queue = skb_get_queue_mapping(skb);
index = fec_enet_get_bd_index(bdp, &txq->bd); index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align || if (((unsigned long) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
...@@ -487,7 +484,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -487,7 +484,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -521,7 +518,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -521,7 +518,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp; txq->bd.cur = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); writel(0, txq->bd.reg_desc_active);
return 0; return 0;
} }
...@@ -534,7 +531,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -534,7 +531,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
dma_addr_t addr; dma_addr_t addr;
...@@ -566,7 +562,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -566,7 +562,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -595,7 +591,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -595,7 +591,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb);
void *bufaddr; void *bufaddr;
unsigned long dmabuf; unsigned long dmabuf;
unsigned short status; unsigned short status;
...@@ -630,7 +625,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -630,7 +625,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
...@@ -650,7 +645,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -650,7 +645,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left; int total_len, data_left;
struct bufdesc *bdp = txq->bd.cur; struct bufdesc *bdp = txq->bd.cur;
unsigned short queue = skb_get_queue_mapping(skb);
struct tso_t tso; struct tso_t tso;
unsigned int index = 0; unsigned int index = 0;
int ret; int ret;
...@@ -715,11 +709,11 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -715,11 +709,11 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
/* Trigger transmission start */ /* Trigger transmission start */
if (!(fep->quirks & FEC_QUIRK_ERR007885) || if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(txq->bd.reg_desc_active) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) !readl(txq->bd.reg_desc_active))
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); writel(0, txq->bd.reg_desc_active);
return 0; return 0;
...@@ -819,7 +813,7 @@ static void fec_enet_active_rxring(struct net_device *ndev) ...@@ -819,7 +813,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
int i; int i;
for (i = 0; i < fep->num_rx_queues; i++) for (i = 0; i < fep->num_rx_queues; i++)
writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); writel(0, fep->rx_queue[i]->bd.reg_desc_active);
} }
static void fec_enet_enable_ring(struct net_device *ndev) static void fec_enet_enable_ring(struct net_device *ndev)
...@@ -1255,8 +1249,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ...@@ -1255,8 +1249,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* ERR006538: Keep the transmitter going */ /* ERR006538: Keep the transmitter going */
if (bdp != txq->bd.cur && if (bdp != txq->bd.cur &&
readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) readl(txq->bd.reg_desc_active) == 0)
writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); writel(0, txq->bd.reg_desc_active);
} }
static void static void
...@@ -1498,7 +1492,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1498,7 +1492,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* incoming frames. On a heavily loaded network, we should be * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources. * able to keep up at the expense of system resources.
*/ */
writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); writel(0, rxq->bd.reg_desc_active);
} }
rxq->bd.cur = bdp; rxq->bd.cur = bdp;
return pkt_received; return pkt_received;
...@@ -3061,6 +3055,14 @@ static const struct net_device_ops fec_netdev_ops = { ...@@ -3061,6 +3055,14 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_set_features = fec_set_features, .ndo_set_features = fec_set_features,
}; };
static const unsigned short offset_des_active_rxq[] = {
FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
};
static const unsigned short offset_des_active_txq[] = {
FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
};
/* /*
* XXX: We need to clean up on failure exits here. * XXX: We need to clean up on failure exits here.
* *
...@@ -3114,6 +3116,7 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3114,6 +3116,7 @@ static int fec_enet_init(struct net_device *ndev)
rxq->bd.dma = bd_dma; rxq->bd.dma = bd_dma;
rxq->bd.dsize = dsize; rxq->bd.dsize = dsize;
rxq->bd.dsize_log2 = dsize_log2; rxq->bd.dsize_log2 = dsize_log2;
rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
bd_dma += size; bd_dma += size;
cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
...@@ -3129,6 +3132,7 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3129,6 +3132,7 @@ static int fec_enet_init(struct net_device *ndev)
txq->bd.dma = bd_dma; txq->bd.dma = bd_dma;
txq->bd.dsize = dsize; txq->bd.dsize = dsize;
txq->bd.dsize_log2 = dsize_log2; txq->bd.dsize_log2 = dsize_log2;
txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
bd_dma += size; bd_dma += size;
cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment