Commit d5b2d7a7 authored by Sunil Goutham's avatar Sunil Goutham Committed by David S. Miller

net: thunderx: Configure RED and backpressure levels

This patch enables moving average calculation of Rx pkt's resources
and configures RED and backpressure levels for both CQ and RBDR.
Also initialize SQ's CQ_LIMIT properly.
Signed-off-by: default avatarSunil Goutham <sgoutham@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1cc70259
...@@ -809,6 +809,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) ...@@ -809,6 +809,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable); bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
/* Enable moving average calculation.
* Keep the LVL/AVG delay to HW enforced minimum so that, not too many
* packets sneek in between average calculations.
*/
nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
(BIT_ULL(20) | 0x2ull << 14 | 0x1));
nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
(BIT_ULL(20) | 0x3ull << 14 | 0x1));
return 0; return 0;
} }
......
...@@ -544,14 +544,18 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, ...@@ -544,14 +544,18 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
(RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
(qs->vnic_id << 0);
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
/* RQ drop config /* RQ drop config
* Enable CQ drop to reserve sufficient CQEs for all tx packets * Enable CQ drop to reserve sufficient CQEs for all tx packets
*/ */
mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
(RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
(RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
nicvf_send_msg_to_pf(nic, &mbx); nicvf_send_msg_to_pf(nic, &mbx);
if (!nic->sqs_mode && (qidx == 0)) { if (!nic->sqs_mode && (qidx == 0)) {
...@@ -650,6 +654,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, ...@@ -650,6 +654,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
sq_cfg.ldwb = 0; sq_cfg.ldwb = 0;
sq_cfg.qsize = SND_QSIZE; sq_cfg.qsize = SND_QSIZE;
sq_cfg.tstmp_bgx_intf = 0; sq_cfg.tstmp_bgx_intf = 0;
sq_cfg.cq_limit = 0;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
/* Set threshold value for interrupt generation */ /* Set threshold value for interrupt generation */
......
...@@ -85,12 +85,26 @@ ...@@ -85,12 +85,26 @@
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT) MAX_CQE_PER_PKT_XMIT)
/* Calculate number of CQEs to reserve for all SQEs.
* Its 1/256th level of CQ size. /* RED and Backpressure levels of CQ for pkt reception
* '+ 1' to account for pipelining * For CQ, level is a measure of emptiness i.e 0x0 means full
* eg: For CQ of size 4K, and for pass/drop levels of 128/96
* HW accepts pkt if unused CQE >= 2048
* RED accepts pkt if unused CQE < 2048 & >= 1536
* DROPs pkts if unused CQE < 1536
*/
#define RQ_PASS_CQ_LVL 128ULL
#define RQ_DROP_CQ_LVL 96ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
* eg: For RBDR of size 8K, and for pass/drop levels of 4/0
* HW accepts pkt if unused RBs >= 256
* RED accepts pkt if unused RBs < 256 & >= 0
* DROPs pkts if unused RBs < 0
*/ */
#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ #define RQ_PASS_RBDR_LVL 8ULL
(CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) #define RQ_DROP_RBDR_LVL 0ULL
/* Descriptor size in bytes */ /* Descriptor size in bytes */
#define SND_QUEUE_DESC_SIZE 16 #define SND_QUEUE_DESC_SIZE 16
......
...@@ -624,7 +624,9 @@ struct cq_cfg { ...@@ -624,7 +624,9 @@ struct cq_cfg {
struct sq_cfg { struct sq_cfg {
#if defined(__BIG_ENDIAN_BITFIELD) #if defined(__BIG_ENDIAN_BITFIELD)
u64 reserved_20_63:44; u64 reserved_32_63:32;
u64 cq_limit:8;
u64 reserved_20_23:4;
u64 ena:1; u64 ena:1;
u64 reserved_18_18:1; u64 reserved_18_18:1;
u64 reset:1; u64 reset:1;
...@@ -642,7 +644,9 @@ struct sq_cfg { ...@@ -642,7 +644,9 @@ struct sq_cfg {
u64 reset:1; u64 reset:1;
u64 reserved_18_18:1; u64 reserved_18_18:1;
u64 ena:1; u64 ena:1;
u64 reserved_20_63:44; u64 reserved_20_23:4;
u64 cq_limit:8;
u64 reserved_32_63:32;
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment