Commit 8dcf69a6 authored by Daniel Machon's avatar Daniel Machon Committed by Paolo Abeni

net: microchip: sparx5: add support for offloading dscp table

Add support for offloading dscp app entries. Dscp values are global for
all ports on the sparx5 switch. Therefore, we replicate each dscp app
entry per-port.
Signed-off-by: default avatarDaniel Machon <daniel.machon@microchip.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 23f8382c
......@@ -49,6 +49,13 @@ static int sparx5_dcb_app_validate(struct net_device *dev,
int err = 0;
switch (app->selector) {
/* Dscp checks */
case IEEE_8021QAZ_APP_SEL_DSCP:
if (app->protocol >= SPARX5_PORT_QOS_DSCP_COUNT)
err = -EINVAL;
else if (app->priority >= SPX5_PRIOS)
err = -ERANGE;
break;
/* Pcp checks */
case DCB_APP_SEL_PCP:
if (app->protocol >= SPARX5_PORT_QOS_PCP_DEI_COUNT)
......@@ -119,17 +126,27 @@ static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
static int sparx5_dcb_app_update(struct net_device *dev)
{
struct dcb_app app_itr = { .selector = DCB_APP_SEL_PCP };
struct sparx5_port *port = netdev_priv(dev);
struct sparx5_port_qos_dscp_map *dscp_map;
struct sparx5_port_qos_pcp_map *pcp_map;
struct sparx5_port_qos qos = {0};
struct dcb_app app_itr = {0};
int portno = port->portno;
int i;
dscp_map = &qos.dscp.map;
pcp_map = &qos.pcp.map;
/* Get dscp ingress mapping */
for (i = 0; i < ARRAY_SIZE(dscp_map->map); i++) {
app_itr.selector = IEEE_8021QAZ_APP_SEL_DSCP;
app_itr.protocol = i;
dscp_map->map[i] = dcb_getapp(dev, &app_itr);
}
/* Get pcp ingress mapping */
for (i = 0; i < ARRAY_SIZE(pcp_map->map); i++) {
app_itr.selector = DCB_APP_SEL_PCP;
app_itr.protocol = i;
pcp_map->map[i] = dcb_getapp(dev, &app_itr);
}
......@@ -140,9 +157,44 @@ static int sparx5_dcb_app_update(struct net_device *dev)
qos.pcp.dp_enable = qos.pcp.qos_enable;
}
/* Enable use of dscp for queue classification ? */
if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
qos.dscp.qos_enable = true;
qos.dscp.dp_enable = qos.dscp.qos_enable;
}
return sparx5_port_qos_set(port, &qos);
}
/* Set or delete dscp app entry.
*
* Dscp mapping is global for all ports, so set and delete app entries are
* replicated for each port.
*/
static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
struct dcb_app *app, bool del)
{
struct sparx5_port *port = netdev_priv(dev);
struct dcb_app apps[SPX5_PORTS];
struct sparx5_port *port_itr;
int err, i;
for (i = 0; i < SPX5_PORTS; i++) {
port_itr = port->sparx5->ports[i];
if (!port_itr)
continue;
memcpy(&apps[i], app, sizeof(struct dcb_app));
if (del)
err = dcb_ieee_delapp(port_itr->ndev, &apps[i]);
else
err = dcb_ieee_setapp(port_itr->ndev, &apps[i]);
if (err)
return err;
}
return 0;
}
static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct dcb_app app_itr;
......@@ -161,7 +213,11 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
dcb_ieee_delapp(dev, &app_itr);
}
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false);
else
err = dcb_ieee_setapp(dev, app);
if (err)
goto out;
......@@ -175,7 +231,11 @@ static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
int err;
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true);
else
err = dcb_ieee_delapp(dev, app);
if (err < 0)
return err;
......
......@@ -1149,6 +1149,7 @@ void sparx5_port_enable(struct sparx5_port *port, bool enable)
int sparx5_port_qos_set(struct sparx5_port *port,
struct sparx5_port_qos *qos)
{
sparx5_port_qos_dscp_set(port, &qos->dscp);
sparx5_port_qos_pcp_set(port, &qos->pcp);
return 0;
......@@ -1181,3 +1182,41 @@ int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
return 0;
}
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos)
{
struct sparx5 *sparx5 = port->sparx5;
u8 *dscp = qos->map.map;
int i;
/* Enable/disable dscp and dp for qos classification.
* Disable rewrite of dscp values for now.
*/
spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
ANA_CL_QOS_CFG(port->portno));
/* Map each dscp value to priority and dp */
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
ANA_CL_DSCP_CFG(i));
}
/* Set per-dscp trust */
for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
if (qos->qos_enable) {
spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
ANA_CL_DSCP_CFG(i));
}
}
return 0;
}
......@@ -99,14 +99,26 @@ struct sparx5_port_qos_pcp_map {
u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
};
#define SPARX5_PORT_QOS_DSCP_COUNT 64
struct sparx5_port_qos_dscp_map {
u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
};
struct sparx5_port_qos_pcp {
struct sparx5_port_qos_pcp_map map;
bool qos_enable;
bool dp_enable;
};
struct sparx5_port_qos_dscp {
struct sparx5_port_qos_dscp_map map;
bool qos_enable;
bool dp_enable;
};
struct sparx5_port_qos {
struct sparx5_port_qos_pcp pcp;
struct sparx5_port_qos_dscp dscp;
};
int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
......@@ -114,4 +126,6 @@ int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos);
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos);
#endif /* __SPARX5_PORT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment