Commit 2e8ef77e authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Add TC to hardware QoS queue mapping logic.

The current driver maps MQPRIO traffic classes directly 1:1 to the
internal hardware queues (TC0 maps to hardware queue 0, etc).  This
direct mapping requires the internal hardware queues to be reconfigured
from lossless to lossy and vice versa when necessary.  This
involves reconfiguring internal buffer thresholds which is
disruptive and not always reliable.

Implement a new scheme to map TCs to internal hardware queues by
matching up their PFC requirements.  This will eliminate the need
to reconfigure a hardware queue internal buffers at run time.  After
remapping, the NIC is closed and opened for the new TC to hardware
queues to take effect.

This patch only adds the basic mapping logic.
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c347b927
...@@ -2383,6 +2383,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2383,6 +2383,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring; struct bnxt_ring_struct *ring;
u8 qidx;
ring = &txr->tx_ring_struct; ring = &txr->tx_ring_struct;
...@@ -2411,7 +2412,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) ...@@ -2411,7 +2412,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
} }
ring->queue_id = bp->q_info[j].queue_id; qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
if (i < bp->tx_nr_rings_xdp) if (i < bp->tx_nr_rings_xdp)
continue; continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
...@@ -5309,6 +5311,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) ...@@ -5309,6 +5311,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
for (i = 0; i < bp->max_tc; i++) { for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++; bp->q_info[i].queue_id = *qptr++;
bp->q_info[i].queue_profile = *qptr++; bp->q_info[i].queue_profile = *qptr++;
bp->tc_to_qidx[i] = i;
} }
qportcfg_exit: qportcfg_exit:
......
...@@ -1242,6 +1242,7 @@ struct bnxt { ...@@ -1242,6 +1242,7 @@ struct bnxt {
u8 max_tc; u8 max_tc;
u8 max_lltc; /* lossless TCs */ u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
u8 tc_to_qidx[BNXT_MAX_QUEUE];
unsigned int current_interval; unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ #define BNXT_TIMER_INTERVAL HZ
......
...@@ -21,6 +21,21 @@ ...@@ -21,6 +21,21 @@
#include "bnxt_dcb.h" #include "bnxt_dcb.h"
#ifdef CONFIG_BNXT_DCB #ifdef CONFIG_BNXT_DCB
static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
{
int i, j;
for (i = 0; i < bp->max_tc; i++) {
if (bp->q_info[i].queue_id == queue_id) {
for (j = 0; j < bp->max_tc; j++) {
if (bp->tc_to_qidx[j] == i)
return j;
}
}
}
return -EINVAL;
}
static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
{ {
struct hwrm_queue_pri2cos_cfg_input req = {0}; struct hwrm_queue_pri2cos_cfg_input req = {0};
...@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
pri2cos = &req.pri0_cos_queue_id; pri2cos = &req.pri0_cos_queue_id;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 qidx;
req.enables |= cpu_to_le32( req.enables |= cpu_to_le32(
QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id; qidx = bp->tc_to_qidx[ets->prio_tc[i]];
pri2cos[i] = bp->q_info[qidx].queue_id;
} }
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
return rc; return rc;
...@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id; u8 *pri2cos = &resp->pri0_cos_queue_id;
int i, j; int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
u8 queue_id = pri2cos[i]; u8 queue_id = pri2cos[i];
int tc;
for (j = 0; j < bp->max_tc; j++) { tc = bnxt_queue_to_tc(bp, queue_id);
if (bp->q_info[j].queue_id == queue_id) { if (tc >= 0)
ets->prio_tc[i] = j; ets->prio_tc[i] = tc;
break;
}
}
} }
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
...@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, ...@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
void *data; void *data;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
data = &req.unused_0; for (i = 0; i < max_tc; i++) {
for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) { u8 qidx;
req.enables |= cpu_to_le32( req.enables |= cpu_to_le32(
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
memset(&cos2bw, 0, sizeof(cos2bw)); memset(&cos2bw, 0, sizeof(cos2bw));
cos2bw.queue_id = bp->q_info[i].queue_id; qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa = cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
...@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, ...@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cpu_to_le32((ets->tc_tx_bw[i] * 100) | cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100); BW_VALUE_UNIT_PERCENT1_100);
} }
data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (i == 0) { if (qidx == 0) {
req.queue_id0 = cos2bw.queue_id; req.queue_id0 = cos2bw.queue_id;
req.unused_0 = 0; req.unused_0 = 0;
} }
...@@ -132,22 +151,22 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ...@@ -132,22 +151,22 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
int j; int tc;
memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4); memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
if (i == 0) if (i == 0)
cos2bw.queue_id = resp->queue_id0; cos2bw.queue_id = resp->queue_id0;
for (j = 0; j < bp->max_tc; j++) { tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
if (bp->q_info[j].queue_id != cos2bw.queue_id) if (tc < 0)
continue; continue;
if (cos2bw.tsa == if (cos2bw.tsa ==
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT; ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
} else { } else {
ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS; ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
ets->tc_tx_bw[j] = cos2bw.bw_weight; ets->tc_tx_bw[tc] = cos2bw.bw_weight;
}
} }
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment