Commit 2c204c2b authored by Sunil Goutham's avatar Sunil Goutham Committed by David S. Miller

net: thunderx: Support for byte queue limits

This patch adds support for byte queue limits
Signed-off-by: default avatarSunil Goutham <sgoutham@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b4e28c1f
...@@ -516,7 +516,8 @@ static int nicvf_init_resources(struct nicvf *nic) ...@@ -516,7 +516,8 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev, static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq, struct cmp_queue *cq,
struct cqe_send_t *cqe_tx, struct cqe_send_t *cqe_tx,
int cqe_type, int budget) int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
...@@ -547,6 +548,8 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, ...@@ -547,6 +548,8 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
} }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb); prefetch(skb);
(*tx_pkts)++;
*tx_bytes += skb->len;
napi_consume_skb(skb, budget); napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else { } else {
...@@ -662,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, ...@@ -662,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx]; struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc; struct cqe_rx_t *cq_desc;
struct netdev_queue *txq; struct netdev_queue *txq;
unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock); spin_lock_bh(&cq->lock);
loop: loop:
...@@ -701,7 +705,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, ...@@ -701,7 +705,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
case CQE_TYPE_SEND: case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq, nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND, (void *)cq_desc, CQE_TYPE_SEND,
budget); budget, &tx_pkts, &tx_bytes);
tx_done++; tx_done++;
break; break;
case CQE_TYPE_INVALID: case CQE_TYPE_INVALID:
...@@ -730,6 +734,9 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, ...@@ -730,6 +734,9 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
netdev = nic->pnicvf->netdev; netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev, txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx)); nicvf_netdev_qidx(nic, cq_idx));
if (tx_pkts)
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
nic = nic->pnicvf; nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq); netif_tx_start_queue(txq);
...@@ -1160,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev) ...@@ -1160,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev); netif_tx_disable(netdev);
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
/* Free resources */ /* Free resources */
nicvf_config_data_transfer(nic, false); nicvf_config_data_transfer(nic, false);
......
...@@ -1082,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, ...@@ -1082,6 +1082,24 @@ static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
imm->len = 1; imm->len = 1;
} }
static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
int sq_num, int desc_cnt)
{
struct netdev_queue *txq;
txq = netdev_get_tx_queue(nic->pnicvf->netdev,
skb_get_queue_mapping(skb));
netdev_tx_sent_queue(txq, skb->len);
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
sq_num, desc_cnt);
}
/* Segment a TSO packet into 'gso_size' segments and append /* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer * them to SQ for transfer
*/ */
...@@ -1141,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, ...@@ -1141,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */ /* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb; sq->skbuff[hdr_qentry] = (u64)skb;
/* make sure all memory stores are done before ringing doorbell */ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
smp_wmb();
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
sq_num, desc_cnt);
nic->drv_stats.tx_tso++; nic->drv_stats.tx_tso++;
return 1; return 1;
} }
...@@ -1219,12 +1233,8 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) ...@@ -1219,12 +1233,8 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
} }
/* make sure all memory stores are done before ringing doorbell */ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
smp_wmb();
/* Inform HW to xmit new packet */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
sq_num, subdesc_cnt);
return 1; return 1;
append_fail: append_fail:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment