Commit 58f9f9b5 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'configuring-congestion-watermarks-on-ocelot-switch-using-devlink-sb'

Vladimir Oltean says:

====================
Configuring congestion watermarks on ocelot switch using devlink-sb

In some applications, it is important to create resource reservations in
the Ethernet switches, to prevent background traffic, or deliberate
attacks, from inducing denial of service into the high-priority traffic.

These patches give the user some knobs to turn. The ocelot switches
support per-port and per-port-tc reservations, on ingress and on egress.
The resources that are monitored are packet buffers (in cells of 60
bytes each) and frame references.

The frames that exceed the reservations can optionally consume from
sharing watermarks which are not per-port but global across the switch.
There are 10 sharing watermarks, 8 of them are per traffic class and 2
are per drop priority.

I am configuring the hardware using the best of my knowledge, and mostly
through trial and error. Same goes for devlink-sb integration. Feedback
is welcome.
====================

Link: https://lore.kernel.org/r/20210115021120.3055988-1-olteanv@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2d9116be f59fd9ca
......@@ -299,7 +299,7 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
ANA_PORT_QOS_CFG,
port);
for (i = 0; i < FELIX_NUM_TC * 2; i++) {
for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
ocelot_rmw_ix(ocelot,
(ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
......@@ -422,12 +422,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->map = felix->info->map;
ocelot->stats_layout = felix->info->stats_layout;
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->devlink = felix->ds->devlink;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
......@@ -589,6 +589,10 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, port);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
return err;
/* Include the CPU port module in the forwarding mask for unknown
* unicast - the hardware default value for ANA_FLOODING_FLD_UNICAST
* excludes BIT(ocelot->num_phys_ports), and so does ocelot_init, since
......@@ -610,14 +614,15 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
int port;
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_deinit_port(ocelot, port);
ocelot_deinit_timestamp(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
}
static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
......@@ -751,44 +756,156 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port,
return -EOPNOTSUPP;
}
static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
}
static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
threshold_type, extack);
}
static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
p_threshold);
}
static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
threshold, extack);
}
static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_pool_index,
p_threshold);
}
static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
pool_type, pool_index, threshold,
extack);
}
static int felix_sb_occ_snapshot(struct dsa_switch *ds,
unsigned int sb_index)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_snapshot(ocelot, sb_index);
}
static int felix_sb_occ_max_clear(struct dsa_switch *ds,
unsigned int sb_index)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_max_clear(ocelot, sb_index);
}
static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
p_cur, p_max);
}
static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = ds->priv;
return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_cur, p_max);
}
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_validate = felix_phylink_validate,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_bridge_join = felix_bridge_join,
.port_bridge_leave = felix_bridge_leave,
.port_stp_state_set = felix_bridge_stp_state_set,
.port_vlan_filtering = felix_vlan_filtering,
.port_vlan_add = felix_vlan_add,
.port_vlan_del = felix_vlan_del,
.port_hwtstamp_get = felix_hwtstamp_get,
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
.port_change_mtu = felix_change_mtu,
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
.port_setup_tc = felix_port_setup_tc,
.get_tag_protocol = felix_get_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_validate = felix_phylink_validate,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_disable = felix_port_disable,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_bridge_join = felix_bridge_join,
.port_bridge_leave = felix_bridge_leave,
.port_stp_state_set = felix_bridge_stp_state_set,
.port_vlan_filtering = felix_vlan_filtering,
.port_vlan_add = felix_vlan_add,
.port_vlan_del = felix_vlan_del,
.port_hwtstamp_get = felix_hwtstamp_get,
.port_hwtstamp_set = felix_hwtstamp_set,
.port_rxtstamp = felix_rxtstamp,
.port_txtstamp = felix_txtstamp,
.port_change_mtu = felix_change_mtu,
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
.port_setup_tc = felix_port_setup_tc,
.devlink_sb_pool_get = felix_sb_pool_get,
.devlink_sb_pool_set = felix_sb_pool_set,
.devlink_sb_port_pool_get = felix_sb_port_pool_get,
.devlink_sb_port_pool_set = felix_sb_port_pool_set,
.devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
.devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
.devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
.devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
.devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
.devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
......
......@@ -5,7 +5,6 @@
#define _MSCC_FELIX_H
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_NUM_TC 8
/* Platform-specific information */
struct felix_info {
......@@ -15,7 +14,6 @@ struct felix_info {
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
int shared_queue_sz;
int num_mact_rows;
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
......
......@@ -1006,9 +1006,27 @@ static u16 vsc9959_wm_enc(u16 value)
return value;
}
static u16 vsc9959_wm_dec(u16 wm)
{
WARN_ON(wm & ~GENMASK(8, 0));
if (wm & BIT(8))
return (wm & GENMASK(7, 0)) * 16;
return wm;
}
static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(23, 12)) >> 12;
*maxuse = val & GENMASK(11, 0);
}
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
.wm_dec = vsc9959_wm_dec,
.wm_stat = vsc9959_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
......@@ -1356,10 +1374,9 @@ static const struct felix_info felix_info_vsc9959 = {
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
.num_ports = 6,
.num_tx_queues = FELIX_NUM_TC,
.num_tx_queues = OCELOT_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
.ptp_caps = &vsc9959_ptp_caps,
......@@ -1418,7 +1435,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, felix);
ocelot = &felix->ocelot;
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = FELIX_NUM_TC;
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev,
felix->info->switch_pci_bar);
......
......@@ -1057,9 +1057,27 @@ static u16 vsc9953_wm_enc(u16 value)
return value;
}
static u16 vsc9953_wm_dec(u16 wm)
{
WARN_ON(wm & ~GENMASK(9, 0));
if (wm & BIT(9))
return (wm & GENMASK(8, 0)) * 16;
return wm;
}
static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(25, 13)) >> 13;
*maxuse = val & GENMASK(12, 0);
}
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
.wm_dec = vsc9953_wm_dec,
.wm_stat = vsc9953_wm_stat,
.port_to_netdev = felix_port_to_netdev,
.netdev_to_port = felix_netdev_to_port,
};
......@@ -1181,9 +1199,9 @@ static const struct felix_info seville_info_vsc9953 = {
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
.shared_queue_sz = 256 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
......
......@@ -6,7 +6,8 @@ mscc_ocelot_switch_lib-y := \
ocelot_police.o \
ocelot_vcap.o \
ocelot_flower.o \
ocelot_ptp.o
ocelot_ptp.o \
ocelot_devlink.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_vsc7514.o \
......
......@@ -1354,7 +1354,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
pause_stop);
/* Tail dropping watermarks */
atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
......@@ -1467,6 +1467,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ANA_PORT_VLAN_CFG, cpu);
}
static void ocelot_detect_features(struct ocelot *ocelot)
{
int mmgt, eq_ctrl;
/* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
* the number of 240-byte free memory words (aka 4-cell chunks) and not
* 192 bytes as the documentation incorrectly says.
*/
mmgt = ocelot_read(ocelot, SYS_MMGT);
ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
}
int ocelot_init(struct ocelot *ocelot)
{
char queue_name[32];
......@@ -1509,6 +1524,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
......
......@@ -121,13 +121,15 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct phy_device *phy);
void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
enum ocelot_tag_prefix injection,
enum ocelot_tag_prefix extraction);
int ocelot_devlink_init(struct ocelot *ocelot);
void ocelot_devlink_teardown(struct ocelot *ocelot);
int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
extern const struct devlink_ops ocelot_devlink_ops;
#endif
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2020-2021 NXP Semiconductors
*/
#include <net/devlink.h>
#include "ocelot.h"
/* The queue system tracks four resource consumptions:
* Resource 0: Memory tracked per source port
* Resource 1: Frame references tracked per source port
* Resource 2: Memory tracked per destination port
* Resource 3: Frame references tracked per destination port
*/
#define OCELOT_RESOURCE_SZ 256
#define OCELOT_NUM_RESOURCES 4
#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
/* For each resource type there are 4 types of watermarks:
* Q_RSRV: reservation per QoS class per port
* PRIO_SHR: sharing watermark per QoS class across all ports
* P_RSRV: reservation per port
* COL_SHR: sharing watermark per color (drop precedence) across all ports
*/
#define xxx_Q_RSRV_x 0
#define xxx_PRIO_SHR_x 216
#define xxx_P_RSRV_x 224
#define xxx_COL_SHR_x 254
/* Reservation Watermarks
* ----------------------
*
* For setting up the reserved areas, egress watermarks exist per port and per
* QoS class for both ingress and egress.
*/
/* Amount of packet buffer
* | per QoS class
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* BUF_Q_RSRV_E
*/
#define BUF_Q_RSRV_E(port, prio) \
(BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of packet buffer
* | for all port's traffic classes
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* BUF_P_RSRV_E
*/
#define BUF_P_RSRV_E(port) \
(BUF_xxxx_E + xxx_P_RSRV_x + (port))
/* Amount of packet buffer
* | per QoS class
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* BUF_Q_RSRV_I
*/
#define BUF_Q_RSRV_I(port, prio) \
(BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of packet buffer
* | for all port's traffic classes
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* BUF_P_RSRV_I
*/
#define BUF_P_RSRV_I(port) \
(BUF_xxxx_I + xxx_P_RSRV_x + (port))
/* Amount of frame references
* | per QoS class
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* REF_Q_RSRV_E
*/
#define REF_Q_RSRV_E(port, prio) \
(REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of frame references
* | for all port's traffic classes
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* REF_P_RSRV_E
*/
#define REF_P_RSRV_E(port) \
(REF_xxxx_E + xxx_P_RSRV_x + (port))
/* Amount of frame references
* | per QoS class
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* REF_Q_RSRV_I
*/
#define REF_Q_RSRV_I(port, prio) \
(REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of frame references
* | for all port's traffic classes
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* REF_P_RSRV_I
*/
#define REF_P_RSRV_I(port) \
(REF_xxxx_I + xxx_P_RSRV_x + (port))
/* Sharing Watermarks
* ------------------
*
* The shared memory area is shared between all ports.
*/
/* Amount of buffer
* | per QoS class
* | | from the shared memory area
* | | | for egress traffic
* | | | |
* V V v v
* BUF_PRIO_SHR_E
*/
#define BUF_PRIO_SHR_E(prio) \
(BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
/* Amount of buffer
* | per color (drop precedence level)
* | | from the shared memory area
* | | | for egress traffic
* | | | |
* V V v v
* BUF_COL_SHR_E
*/
#define BUF_COL_SHR_E(dp) \
(BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of buffer
* | per QoS class
* | | from the shared memory area
* | | | for ingress traffic
* | | | |
* V V v v
* BUF_PRIO_SHR_I
*/
#define BUF_PRIO_SHR_I(prio) \
(BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
/* Amount of buffer
* | per color (drop precedence level)
* | | from the shared memory area
* | | | for ingress traffic
* | | | |
* V V v v
* BUF_COL_SHR_I
*/
#define BUF_COL_SHR_I(dp) \
(BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of frame references
* | per QoS class
* | | from the shared area
* | | | for egress traffic
* | | | |
* V V v v
* REF_PRIO_SHR_E
*/
#define REF_PRIO_SHR_E(prio) \
(REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
/* Amount of frame references
* | per color (drop precedence level)
* | | from the shared area
* | | | for egress traffic
* | | | |
* V V v v
* REF_COL_SHR_E
*/
#define REF_COL_SHR_E(dp) \
(REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of frame references
* | per QoS class
* | | from the shared area
* | | | for ingress traffic
* | | | |
* V V v v
* REF_PRIO_SHR_I
*/
#define REF_PRIO_SHR_I(prio) \
(REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
/* Amount of frame references
* | per color (drop precedence level)
* | | from the shared area
* | | | for ingress traffic
* | | | |
* V V v v
* REF_COL_SHR_I
*/
#define REF_COL_SHR_I(dp) \
(REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
{
int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
return ocelot->ops->wm_dec(wm);
}
static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
{
u32 wm = ocelot->ops->wm_enc(val);
ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
}
static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
u32 *maxuse)
{
int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
}
/* The hardware comes out of reset with strange defaults: the sum of all
* reservations for frame memory is larger than the total buffer size.
* One has to wonder how can the reservation watermarks still guarantee
* anything under congestion.
* Bring some sense into the hardware by changing the defaults to disable all
* reservations and rely only on the sharing watermark for frames with drop
* precedence 0. The user can still explicitly request reservations per port
* and per port-tc through devlink-sb.
*/
static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
int port)
{
int prio;
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
}
ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
}
/* We want the sharing watermarks to consume all nonreserved resources, for
* efficient resource utilization (a single traffic flow should be able to use
* up the entire buffer space and frame resources as long as there's no
* interference).
* The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
* per color (drop precedence).
* The trouble with configuring these sharing watermarks is that:
* (1) There's a risk that we overcommit the resources if we configure
* (a) all 8 per-TC sharing watermarks to the max
* (b) all 2 per-color sharing watermarks to the max
* (2) There's a risk that we undercommit the resources if we configure
* (a) all 8 per-TC sharing watermarks to "max / 8"
* (b) all 2 per-color sharing watermarks to "max / 2"
* So for Linux, let's just disable the sharing watermarks per traffic class
* (setting them to 0 will make them always exceeded), and rely only on the
* sharing watermark for drop priority 0. So frames with drop priority set to 1
* by QoS classification or policing will still be allowed, but only as long as
* the port and port-TC reservations are not exceeded.
*/
static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
{
int prio;
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
}
}
static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
u32 *buf_rsrv_e)
{
int port, prio;
*buf_rsrv_i = 0;
*buf_rsrv_e = 0;
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
*buf_rsrv_i += ocelot_wm_read(ocelot,
BUF_Q_RSRV_I(port, prio));
*buf_rsrv_e += ocelot_wm_read(ocelot,
BUF_Q_RSRV_E(port, prio));
}
*buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
*buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
}
*buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
*buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
}
static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
u32 *ref_rsrv_e)
{
int port, prio;
*ref_rsrv_i = 0;
*ref_rsrv_e = 0;
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
*ref_rsrv_i += ocelot_wm_read(ocelot,
REF_Q_RSRV_I(port, prio));
*ref_rsrv_e += ocelot_wm_read(ocelot,
REF_Q_RSRV_E(port, prio));
}
*ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
*ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
}
}
/* Calculate all reservations, then set up the sharing watermark for DP=0 to
* consume the remaining resources up to the pool's configured size.
*/
static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
{
u32 buf_rsrv_i, buf_rsrv_e;
u32 ref_rsrv_i, ref_rsrv_e;
u32 buf_shr_i, buf_shr_e;
u32 ref_shr_i, ref_shr_e;
ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
buf_rsrv_i;
buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
buf_rsrv_e;
ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
ref_rsrv_i;
ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
ref_rsrv_e;
buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
}
/* Ensure that all reservations can be enforced */
static int ocelot_watermark_validate(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
u32 buf_rsrv_i, buf_rsrv_e;
u32 ref_rsrv_i, ref_rsrv_e;
ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress frame reservations exceed pool size");
return -ERANGE;
}
if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
NL_SET_ERR_MSG_MOD(extack,
"Egress frame reservations exceed pool size");
return -ERANGE;
}
if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress reference reservations exceed pool size");
return -ERANGE;
}
if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
NL_SET_ERR_MSG_MOD(extack,
"Egress reference reservations exceed pool size");
return -ERANGE;
}
return 0;
}
/* The hardware works like this:
*
* Frame forwarding decision taken
* |
* v
* +--------------------+--------------------+--------------------+
* | | | |
* v v v v
* Ingress memory Egress memory Ingress frame Egress frame
* check check reference check reference check
* | | | |
* v v v v
* BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
*(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
* (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
* (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
* (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v v v v v v v v
* fail success fail success fail success fail success
* | | | | | | | |
* v v v v v v v v
* +-----+----+ +-----+----+ +-----+----+ +-----+-----+
* | | | |
* +-------> OR <-------+ +-------> OR <-------+
* | |
* v v
* +----------------> AND <-----------------+
* |
* v
* FIFO drop / accept
*
* We are modeling each of the 4 parallel lookups as a devlink-sb pool.
* At least one (ingress or egress) memory pool and one (ingress or egress)
* frame reference pool need to have resources for frame acceptance to succeed.
*
* The following watermarks are controlled explicitly through devlink-sb:
* BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
* BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
* The following watermarks are controlled implicitly through devlink-sb:
* BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
* The following watermarks are unused and disabled:
* BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
*
* This function overrides the hardware defaults with more sane ones (no
* reservations by default, let sharing use all resources) and disables the
* unused watermarks.
*/
static void ocelot_watermark_init(struct ocelot *ocelot)
{
int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
int port;
ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
for (port = 0; port <= ocelot->num_phys_ports; port++)
ocelot_disable_reservation_watermarks(ocelot, port);
ocelot_disable_tc_sharing_watermarks(ocelot);
ocelot_setup_sharing_watermarks(ocelot);
}
/* Pool size and type are fixed up at runtime. Keeping this structure to
* look up the cell size multipliers.
*/
static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
[OCELOT_SB_BUF] = {
.cell_size = OCELOT_BUFFER_CELL_SZ,
.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
},
[OCELOT_SB_REF] = {
.cell_size = 1,
.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
},
};
/* Returns the pool size configured through ocelot_sb_pool_set */
int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
if (sb_index >= OCELOT_SB_NUM)
return -ENODEV;
if (pool_index >= OCELOT_SB_POOL_NUM)
return -ENODEV;
*pool_info = ocelot_sb_pool[sb_index];
pool_info->size = ocelot->pool_size[sb_index][pool_index];
if (pool_index)
pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
else
pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_pool_get);
/* The pool size received here configures the total amount of resources used on
* ingress (or on egress, depending upon the pool index). The pool size, minus
* the values for the port and port-tc reservations, is written into the
* COL_SHR(dp=0) sharing watermark.
*/
int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
u32 old_pool_size;
int err;
if (sb_index >= OCELOT_SB_NUM) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid sb, use 0 for buffers and 1 for frame references");
return -ENODEV;
}
if (pool_index >= OCELOT_SB_POOL_NUM) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid pool, use 0 for ingress and 1 for egress");
return -ENODEV;
}
if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
NL_SET_ERR_MSG_MOD(extack,
"Only static threshold supported");
return -EOPNOTSUPP;
}
old_pool_size = ocelot->pool_size[sb_index][pool_index];
ocelot->pool_size[sb_index][pool_index] = size;
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot->pool_size[sb_index][pool_index] = old_pool_size;
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_pool_set);
/* This retrieves the configuration made with ocelot_sb_port_pool_set */
int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
return -ENODEV;
}
*p_threshold = ocelot_wm_read(ocelot, wm_index);
*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_port_pool_get);
/* This configures the P_RSRV per-port reserved resource watermark */
int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack)
{
int wm_index, err;
u32 old_thr;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
return -ENODEV;
}
threshold /= ocelot_sb_pool[sb_index].cell_size;
old_thr = ocelot_wm_read(ocelot, wm_index);
ocelot_wm_write(ocelot, wm_index, threshold);
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot_wm_write(ocelot, wm_index, old_thr);
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_port_pool_set);
/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
return -ENODEV;
}
*p_threshold = ocelot_wm_read(ocelot, wm_index);
*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
*p_pool_index = 0;
else
*p_pool_index = 1;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
/* This configures the Q_RSRV per-port-tc reserved resource watermark */
int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
int wm_index, err;
u32 old_thr;
/* Paranoid check? */
if (pool_index == OCELOT_SB_POOL_ING &&
pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
return -EINVAL;
if (pool_index == OCELOT_SB_POOL_EGR &&
pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
return -EINVAL;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
return -ENODEV;
}
threshold /= ocelot_sb_pool[sb_index].cell_size;
old_thr = ocelot_wm_read(ocelot, wm_index);
ocelot_wm_write(ocelot, wm_index, threshold);
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot_wm_write(ocelot, wm_index, old_thr);
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
/* The hardware does not support atomic snapshots, we'll read out the
* occupancy registers individually and have this as just a stub.
*/
int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
{
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
/* The watermark occupancy registers are cleared upon read,
* so let's read them.
*/
int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
{
u32 inuse, maxuse;
int port, prio;
switch (sb_index) {
case OCELOT_SB_BUF:
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
&inuse, &maxuse);
ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
&inuse, &maxuse);
}
ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
&inuse, &maxuse);
ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
&inuse, &maxuse);
}
break;
case OCELOT_SB_REF:
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
&inuse, &maxuse);
ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
&inuse, &maxuse);
}
ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
&inuse, &maxuse);
ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
&inuse, &maxuse);
}
break;
default:
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
return -ENODEV;
}
ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
*p_cur *= ocelot_sb_pool[sb_index].cell_size;
*p_max *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
return -ENODEV;
}
ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
*p_cur *= ocelot_sb_pool[sb_index].cell_size;
*p_max *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
int ocelot_devlink_sb_register(struct ocelot *ocelot)
{
int err;
err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
ocelot->packet_buffer_size, 1, 1,
OCELOT_NUM_TC, OCELOT_NUM_TC);
if (err)
return err;
err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
ocelot->num_frame_refs, 1, 1,
OCELOT_NUM_TC, OCELOT_NUM_TC);
if (err) {
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
return err;
}
ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
ocelot_watermark_init(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_devlink_sb_register);
void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
{
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
}
EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
* This contains glue logic between the switchdev driver operations and the
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
* Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
}
static int devlink_port_to_port(struct devlink_port *dlp)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
return dlp - ocelot->devlink_ports;
}
static int ocelot_devlink_sb_pool_get(struct devlink *dl,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
}
static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
threshold_type, extack);
}
static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
p_threshold);
}
static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
threshold, extack);
}
static int
ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_pool_index,
p_threshold);
}
static int
ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
pool_type, pool_index, threshold,
extack);
}
static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
unsigned int sb_index)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_occ_snapshot(ocelot, sb_index);
}
static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
unsigned int sb_index)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_occ_max_clear(ocelot, sb_index);
}
static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index,
u16 pool_index, u32 *p_cur,
u32 *p_max)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
p_cur, p_max);
}
static int
ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
tc_index, pool_type,
p_cur, p_max);
}
const struct devlink_ops ocelot_devlink_ops = {
.sb_pool_get = ocelot_devlink_sb_pool_get,
.sb_pool_set = ocelot_devlink_sb_pool_set,
.sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
.sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
.sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
};
int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour)
{
struct devlink_port *dlp = &ocelot->devlink_ports[port];
int id_len = sizeof(ocelot->base_mac);
struct devlink *dl = ocelot->devlink;
struct devlink_port_attrs attrs = {};
memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
attrs.switch_id.id_len = id_len;
attrs.phys.port_number = port;
attrs.flavour = flavour;
devlink_port_attrs_set(dlp, &attrs);
return devlink_port_register(dl, dlp, port);
}
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
{
struct devlink_port *dlp = &ocelot->devlink_ports[port];
devlink_port_unregister(dlp);
}
static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->chip_port;
return &ocelot->devlink_ports[port];
}
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
......@@ -525,20 +702,6 @@ static void ocelot_set_rx_mode(struct net_device *dev)
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
static int ocelot_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
struct ocelot_port_private *priv = netdev_priv(dev);
int port = priv->chip_port;
int ret;
ret = snprintf(buf, len, "p%d", port);
if (ret >= len)
return -EINVAL;
return 0;
}
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
......@@ -689,18 +852,6 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
static int ocelot_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
ppid->id_len = sizeof(ocelot->base_mac);
memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
return 0;
}
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
......@@ -727,7 +878,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_set_rx_mode = ocelot_set_rx_mode,
.ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
......@@ -736,9 +886,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_do_ioctl = ocelot_ioctl,
.ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
......
......@@ -517,7 +517,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
ocelot->shared_queue_sz = 224 * 1024;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
......@@ -764,9 +763,25 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
static u16 ocelot_wm_dec(u16 wm)
{
if (wm & BIT(8))
return (wm & GENMASK(7, 0)) * 16;
return wm;
}
static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(23, 12)) >> 12;
*maxuse = val & GENMASK(11, 0);
}
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
.wm_dec = ocelot_wm_dec,
.wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
......@@ -1036,6 +1051,14 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_port_devlink_teardown(ocelot, port);
}
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
......@@ -1063,28 +1086,44 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
struct device_node *portnp;
int err;
bool *registered_ports;
int port, err;
u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports,
sizeof(*ocelot->devlink_ports),
GFP_KERNEL);
if (!ocelot->devlink_ports)
return -ENOMEM;
registered_ports = kcalloc(ocelot->num_phys_ports, sizeof(bool),
GFP_KERNEL);
if (!registered_ports)
return -ENOMEM;
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct device_node *phy_node;
struct devlink_port *dlp;
phy_interface_t phy_mode;
struct phy_device *phy;
struct regmap *target;
struct resource *res;
struct phy *serdes;
char res_name[8];
u32 port;
if (of_property_read_u32(portnp, "reg", &port))
if (of_property_read_u32(portnp, "reg", &reg))
continue;
port = reg;
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
......@@ -1102,15 +1141,26 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!phy)
continue;
err = ocelot_port_devlink_init(ocelot, port,
DEVLINK_PORT_FLAVOUR_PHYSICAL);
if (err) {
of_node_put(portnp);
goto out_teardown;
}
err = ocelot_probe_port(ocelot, port, target, phy);
if (err) {
of_node_put(portnp);
return err;
goto out_teardown;
}
registered_ports[port] = true;
ocelot_port = ocelot->ports[port];
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
dlp = &ocelot->devlink_ports[port];
devlink_port_type_eth_set(dlp, priv->dev);
of_get_phy_mode(portnp, &phy_mode);
......@@ -1135,7 +1185,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
return -EINVAL;
err = -EINVAL;
goto out_teardown;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
......@@ -1149,13 +1200,46 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
of_node_put(portnp);
return err;
goto out_teardown;
}
priv->serdes = serdes;
}
/* Initialize unused devlink ports at the end */
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (registered_ports[port])
continue;
err = ocelot_port_devlink_init(ocelot, port,
DEVLINK_PORT_FLAVOUR_UNUSED);
if (err) {
while (port-- >= 0) {
if (!registered_ports[port])
continue;
ocelot_port_devlink_teardown(ocelot, port);
}
goto out_teardown;
}
}
kfree(registered_ports);
return 0;
out_teardown:
/* Unregister the network interfaces */
mscc_ocelot_release_ports(ocelot);
/* Tear down devlink ports for the registered network interfaces */
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (!registered_ports[port])
continue;
ocelot_port_devlink_teardown(ocelot, port);
}
kfree(registered_ports);
return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
......@@ -1163,6 +1247,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
......@@ -1186,10 +1271,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
if (!ocelot)
devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
if (!devlink)
return -ENOMEM;
ocelot = devlink_priv(devlink);
ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
......@@ -1206,7 +1293,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = NULL;
continue;
}
return PTR_ERR(target);
err = PTR_ERR(target);
goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
......@@ -1215,24 +1303,25 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
return PTR_ERR(hsio);
err = PTR_ERR(hsio);
goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
return err;
goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
if (irq_xtr < 0)
return -ENODEV;
goto out_free_devlink;
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
return err;
goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
......@@ -1241,7 +1330,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
return err;
goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
......@@ -1250,7 +1339,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
return -ENODEV;
err = -ENODEV;
goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
......@@ -1265,10 +1355,18 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
err = mscc_ocelot_init_ports(pdev, ports);
err = devlink_register(devlink, ocelot->dev);
if (err)
goto out_ocelot_deinit;
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_ocelot_release_ports;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
......@@ -1288,10 +1386,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
return 0;
out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
devlink_unregister(devlink);
out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
out_free_devlink:
devlink_free(devlink);
return err;
}
......@@ -1300,11 +1405,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
devlink_free(ocelot->devlink);
return 0;
}
......
......@@ -699,6 +699,40 @@ struct dsa_switch_ops {
int (*devlink_info_get)(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack);
int (*devlink_sb_pool_get)(struct dsa_switch *ds,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack);
int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack);
int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold);
int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack);
int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
unsigned int sb_index);
int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
unsigned int sb_index);
int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max);
int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
/*
* MTU change functionality. Switches can also adjust their MRU through
......
......@@ -98,6 +98,7 @@
#define IFH_REW_OP_TWO_STEP_PTP 0x3
#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_NUM_TC 8
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
#define OCELOT_LONG_PREFIX_LEN 16
......@@ -563,6 +564,8 @@ struct ocelot_ops {
int (*netdev_to_port)(struct net_device *dev);
int (*reset)(struct ocelot *ocelot);
u16 (*wm_enc)(u16 value);
u16 (*wm_dec)(u16 value);
void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse);
};
struct ocelot_vcap_block {
......@@ -576,6 +579,18 @@ struct ocelot_vlan {
u16 vid;
};
enum ocelot_sb {
OCELOT_SB_BUF,
OCELOT_SB_REF,
OCELOT_SB_NUM,
};
enum ocelot_sb_pool {
OCELOT_SB_POOL_ING,
OCELOT_SB_POOL_EGR,
OCELOT_SB_POOL_NUM,
};
struct ocelot_port {
struct ocelot *ocelot;
......@@ -599,6 +614,8 @@ struct ocelot_port {
struct ocelot {
struct device *dev;
struct devlink *devlink;
struct devlink_port *devlink_ports;
const struct ocelot_ops *ops;
struct regmap *targets[TARGET_MAX];
......@@ -607,7 +624,9 @@ struct ocelot {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int shared_queue_sz;
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
int packet_buffer_size;
int num_frame_refs;
int num_mact_rows;
struct net_device *hw_bridge_dev;
......@@ -777,4 +796,38 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb);
int ocelot_devlink_sb_register(struct ocelot *ocelot);
void ocelot_devlink_sb_unregister(struct ocelot *ocelot);
int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack);
int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack);
int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold);
int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack);
int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index);
int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index);
int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max);
int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
#endif
......@@ -71,11 +71,8 @@
#define QSYS_RES_STAT_GSZ 0x8
#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(x) ((x) & GENMASK(15, 0))
#define QSYS_MMGT_EQ_CTRL_FP_FREE_CNT_M GENMASK(15, 0)
#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
......
......@@ -463,8 +463,165 @@ static int dsa_devlink_info_get(struct devlink *dl,
return -EOPNOTSUPP;
}
static int dsa_devlink_sb_pool_get(struct devlink *dl,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
pool_info);
}
static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
threshold_type, extack);
}
static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
pool_index, p_threshold);
}
static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_port_pool_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
pool_index, threshold, extack);
}
static int
dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
tc_index, pool_type,
p_pool_index, p_threshold);
}
static int
dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_tc_pool_bind_set)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
tc_index, pool_type,
pool_index, threshold,
extack);
}
static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_snapshot)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
}
static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
unsigned int sb_index)
{
struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_sb_occ_max_clear)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
}
static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index,
u16 pool_index, u32 *p_cur,
u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_port_pool_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
pool_index, p_cur, p_max);
}
static int
dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
int port = dsa_devlink_port_to_port(dlp);
if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
return -EOPNOTSUPP;
return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
sb_index, tc_index,
pool_type, p_cur,
p_max);
}
static const struct devlink_ops dsa_devlink_ops = {
.info_get = dsa_devlink_info_get,
.info_get = dsa_devlink_info_get,
.sb_pool_get = dsa_devlink_sb_pool_get,
.sb_pool_set = dsa_devlink_sb_pool_set,
.sb_port_pool_get = dsa_devlink_sb_port_pool_get,
.sb_port_pool_set = dsa_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
.sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
};
static int dsa_switch_setup(struct dsa_switch *ds)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment