Commit cb689269 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-devlink-shared-buffers'

Jiri Pirko says:

====================
devlink + mlxsw: add support for config and control of shared buffers

ASICs implement shared buffer for packet forwarding purposes and enable
flexible partitioning of the shared buffer for different flows and ports,
enabling non-blocking progress of different flows as well as separation
of lossy traffic from loss-less traffic when using Per-Priority Flow
Control (PFC). The shared buffer optimizes the buffer utilization for better
absorption of packet bursts.

This patchset implements API which is based on the model SAI uses. That is
aligned with multiple ASIC vendors so this API should be vendor neutral.

Userspace counterpart patchset for devlink iproute2 tool can be found here:
https://github.com/jpirko/iproute2_mlxsw/tree/devlink_sb

Couple of examples of usage:

switch$ devlink sb help
Usage: devlink sb show [ DEV [ sb SB_INDEX ] ]
       devlink sb pool show [ DEV [ sb SB_INDEX ] pool POOL_INDEX ]
       devlink sb pool set DEV [ sb SB_INDEX ] pool POOL_INDEX
                           size POOL_SIZE thtype { static | dynamic }
       devlink sb port pool show [ DEV/PORT_INDEX [ sb SB_INDEX ]
                                   pool POOL_INDEX ]
       devlink sb port pool set DEV/PORT_INDEX [ sb SB_INDEX ]
                                pool POOL_INDEX th THRESHOLD
       devlink sb tc bind show [ DEV/PORT_INDEX [ sb SB_INDEX ] tc TC_INDEX ]
       devlink sb tc bind set DEV/PORT_INDEX [ sb SB_INDEX ] tc TC_INDEX
                              type { ingress | egress } pool POOL_INDEX
                              th THRESHOLD
       devlink sb occupancy show { DEV | DEV/PORT_INDEX } [ sb SB_INDEX ]
       devlink sb occupancy snapshot DEV [ sb SB_INDEX ]
       devlink sb occupancy clearmax DEV [ sb SB_INDEX ]

switch$ devlink sb show
pci/0000:03:00.0: sb 0 size 16777216 ing_pools 4 eg_pools 4 ing_tcs 8 eg_tcs 8

switch$ devlink sb pool show
pci/0000:03:00.0: sb 0 pool 0 type ingress size 12400032 thtype dynamic
pci/0000:03:00.0: sb 0 pool 1 type ingress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 2 type ingress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 3 type ingress size 200064 thtype dynamic
pci/0000:03:00.0: sb 0 pool 4 type egress size 13220064 thtype dynamic
pci/0000:03:00.0: sb 0 pool 5 type egress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 6 type egress size 0 thtype dynamic
pci/0000:03:00.0: sb 0 pool 7 type egress size 0 thtype dynamic

switch$ devlink sb port pool show sw0p7 pool 0
sw0p7: sb 0 pool 0 threshold 16

switch$ sudo devlink sb port pool set sw0p7 pool 0 th 15

switch$ devlink sb port pool show sw0p7 pool 0
sw0p7: sb 0 pool 0 threshold 15

switch$ devlink sb tc bind show sw0p7 tc 0 type ingress
sw0p7: sb 0 tc 0 type ingress pool 0 threshold 10

switch$ sudo devlink sb tc bind set sw0p7 tc 0 type ingress pool 0 th 9

switch$ devlink sb tc bind show sw0p7 tc 0 type ingress
sw0p7: sb 0 tc 0 type ingress pool 0 threshold 9

switch$ sudo devlink sb occupancy snapshot pci/0000:03:00.0

switch$ devlink sb occupancy show sw0p7
sw0p7:
  pool: 0:      82944/3217344 1:          0/0       2:          0/0       3:          0/0
        4:          0/384     5:          0/0       6:          0/0       7:          0/0
  itc:  0(0):   96768/3217344 1(0):       0/0       2(0):       0/0       3(0):       0/0
        4(0):       0/0       5(0):       0/0       6(0):       0/0       7(0):       0/0
  etc:  0(4):       0/384     1(4):       0/0       2(4):       0/0       3(4):       0/0
        4(4):       0/0       5(4):       0/0       6(4):       0/0       7(4):       0/0

switch$ sudo devlink sb occupancy clearmax pci/0000:03:00.0
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f38ba953 2d0ed39f
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/u64_stats_sync.h> #include <linux/u64_stats_sync.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/wait.h> #include <linux/completion.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <net/devlink.h> #include <net/devlink.h>
...@@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core"; ...@@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
static struct dentry *mlxsw_core_dbg_root; static struct dentry *mlxsw_core_dbg_root;
static struct workqueue_struct *mlxsw_wq;
struct mlxsw_core_pcpu_stats { struct mlxsw_core_pcpu_stats {
u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
...@@ -93,11 +96,9 @@ struct mlxsw_core { ...@@ -93,11 +96,9 @@ struct mlxsw_core {
struct list_head rx_listener_list; struct list_head rx_listener_list;
struct list_head event_listener_list; struct list_head event_listener_list;
struct { struct {
struct sk_buff *resp_skb; atomic64_t tid;
u64 tid; struct list_head trans_list;
wait_queue_head_t wait; spinlock_t trans_list_lock; /* protects trans_list writes */
bool trans_active;
struct mutex lock; /* One EMAD transaction at a time. */
bool use_emad; bool use_emad;
} emad; } emad;
struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
...@@ -290,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, ...@@ -290,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
static void mlxsw_emad_pack_op_tlv(char *op_tlv, static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg, const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type, enum mlxsw_core_reg_access_type type,
struct mlxsw_core *mlxsw_core) u64 tid)
{ {
mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
...@@ -306,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv, ...@@ -306,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_WRITE); MLXSW_EMAD_OP_TLV_METHOD_WRITE);
mlxsw_emad_op_tlv_class_set(op_tlv, mlxsw_emad_op_tlv_class_set(op_tlv,
MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
} }
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
...@@ -328,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, ...@@ -328,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg, const struct mlxsw_reg_info *reg,
char *payload, char *payload,
enum mlxsw_core_reg_access_type type, enum mlxsw_core_reg_access_type type,
struct mlxsw_core *mlxsw_core) u64 tid)
{ {
char *buf; char *buf;
...@@ -339,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, ...@@ -339,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
mlxsw_emad_pack_reg_tlv(buf, reg, payload); mlxsw_emad_pack_reg_tlv(buf, reg, payload);
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb); mlxsw_emad_construct_eth_hdr(skb);
} }
...@@ -376,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb) ...@@ -376,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
} }
#define MLXSW_EMAD_TIMEOUT_MS 200 static int mlxsw_emad_process_status(char *op_tlv,
enum mlxsw_emad_op_tlv_status *p_status)
static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
int err;
int ret;
mlxsw_core->emad.trans_active = true;
err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info);
if (err) {
dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
mlxsw_core->emad.tid);
dev_kfree_skb(skb);
goto trans_inactive_out;
}
ret = wait_event_timeout(mlxsw_core->emad.wait,
!(mlxsw_core->emad.trans_active),
msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
if (!ret) {
dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
mlxsw_core->emad.tid);
err = -EIO;
goto trans_inactive_out;
}
return 0;
trans_inactive_out:
mlxsw_core->emad.trans_active = false;
return err;
}
static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
char *op_tlv)
{ {
enum mlxsw_emad_op_tlv_status status; *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
u64 tid;
status = mlxsw_emad_op_tlv_status_get(op_tlv);
tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
switch (status) { switch (*p_status) {
case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
return 0; return 0;
case MLXSW_EMAD_OP_TLV_STATUS_BUSY: case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EAGAIN; return -EAGAIN;
case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
...@@ -438,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, ...@@ -438,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
default: default:
dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EIO; return -EIO;
} }
} }
static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, static int
struct sk_buff *skb) mlxsw_emad_process_status_skb(struct sk_buff *skb,
enum mlxsw_emad_op_tlv_status *p_status)
{
return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
}
struct mlxsw_reg_trans {
struct list_head list;
struct list_head bulk_list;
struct mlxsw_core *core;
struct sk_buff *tx_skb;
struct mlxsw_tx_info tx_info;
struct delayed_work timeout_dw;
unsigned int retries;
u64 tid;
struct completion completion;
atomic_t active;
mlxsw_reg_trans_cb_t *cb;
unsigned long cb_priv;
const struct mlxsw_reg_info *reg;
enum mlxsw_core_reg_access_type type;
int err;
enum mlxsw_emad_op_tlv_status emad_status;
struct rcu_head rcu;
};
#define MLXSW_EMAD_TIMEOUT_MS 200
static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{ {
return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
} }
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, struct mlxsw_reg_trans *trans)
const struct mlxsw_tx_info *tx_info)
{ {
struct sk_buff *trans_skb; struct sk_buff *skb;
int n_retry;
int err; int err;
n_retry = 0; skb = skb_copy(trans->tx_skb, GFP_KERNEL);
retry: if (!skb)
/* We copy the EMAD to a new skb, since we might need return -ENOMEM;
* to retransmit it in case of failure.
*/ atomic_set(&trans->active, 1);
trans_skb = skb_copy(skb, GFP_KERNEL); err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
if (!trans_skb) { if (err) {
err = -ENOMEM; dev_kfree_skb(skb);
goto out; return err;
} }
mlxsw_emad_trans_timeout_schedule(trans);
return 0;
}
err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
if (!err) { {
struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; struct mlxsw_core *mlxsw_core = trans->core;
dev_kfree_skb(trans->tx_skb);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_del_rcu(&trans->list);
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
trans->err = err;
complete(&trans->completion);
}
static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
struct mlxsw_reg_trans *trans)
{
int err;
err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
if (err) trans->retries++;
dev_kfree_skb(resp_skb); err = mlxsw_emad_transmit(trans->core, trans);
if (!err || err != -EAGAIN) if (err == 0)
goto out; return;
} else {
err = -EIO;
} }
if (n_retry++ < MLXSW_EMAD_MAX_RETRY) mlxsw_emad_trans_finish(trans, err);
goto retry; }
out: static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
dev_kfree_skb(skb); {
mlxsw_core->emad.tid++; struct mlxsw_reg_trans *trans = container_of(work,
return err; struct mlxsw_reg_trans,
timeout_dw.work);
if (!atomic_dec_and_test(&trans->active))
return;
mlxsw_emad_transmit_retry(trans->core, trans);
} }
static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
struct mlxsw_reg_trans *trans,
struct sk_buff *skb)
{
int err;
if (!atomic_dec_and_test(&trans->active))
return;
err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
if (err == -EAGAIN) {
mlxsw_emad_transmit_retry(mlxsw_core, trans);
} else {
if (err == 0) {
char *op_tlv = mlxsw_emad_op_tlv(skb);
if (trans->cb)
trans->cb(mlxsw_core,
mlxsw_emad_reg_payload(op_tlv),
trans->reg->len, trans->cb_priv);
}
mlxsw_emad_trans_finish(trans, err);
}
}
/* called with rcu read lock held */
static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
void *priv) void *priv)
{ {
struct mlxsw_core *mlxsw_core = priv; struct mlxsw_core *mlxsw_core = priv;
struct mlxsw_reg_trans *trans;
if (mlxsw_emad_is_resp(skb) && if (!mlxsw_emad_is_resp(skb))
mlxsw_core->emad.trans_active && goto free_skb;
mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
mlxsw_core->emad.resp_skb = skb; list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
mlxsw_core->emad.trans_active = false; if (mlxsw_emad_get_tid(skb) == trans->tid) {
wake_up(&mlxsw_core->emad.wait); mlxsw_emad_process_response(mlxsw_core, trans, skb);
} else { break;
dev_kfree_skb(skb); }
} }
free_skb:
dev_kfree_skb(skb);
} }
static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
...@@ -528,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) ...@@ -528,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{ {
u64 tid;
int err; int err;
/* Set the upper 32 bits of the transaction ID field to a random /* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other * number. This allows us to discard EMADs addressed to other
* devices. * devices.
*/ */
get_random_bytes(&mlxsw_core->emad.tid, 4); get_random_bytes(&tid, 4);
mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; tid <<= 32;
atomic64_set(&mlxsw_core->emad.tid, tid);
init_waitqueue_head(&mlxsw_core->emad.wait); INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
mlxsw_core->emad.trans_active = false; spin_lock_init(&mlxsw_core->emad.trans_list_lock);
mutex_init(&mlxsw_core->emad.lock);
err = mlxsw_core_rx_listener_register(mlxsw_core, err = mlxsw_core_rx_listener_register(mlxsw_core,
&mlxsw_emad_rx_listener, &mlxsw_emad_rx_listener,
...@@ -597,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, ...@@ -597,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
return skb; return skb;
} }
static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
struct mlxsw_reg_trans *trans,
struct list_head *bulk_list,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
struct sk_buff *skb;
int err;
dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
if (!skb)
return -ENOMEM;
list_add_tail(&trans->bulk_list, bulk_list);
trans->core = mlxsw_core;
trans->tx_skb = skb;
trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
trans->tx_info.is_emad = true;
INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
trans->tid = tid;
init_completion(&trans->completion);
trans->cb = cb;
trans->cb_priv = cb_priv;
trans->reg = reg;
trans->type = type;
mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
err = mlxsw_emad_transmit(mlxsw_core, trans);
if (err)
goto err_out;
return 0;
err_out:
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
list_del_rcu(&trans->list);
spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
list_del(&trans->bulk_list);
dev_kfree_skb(trans->tx_skb);
return err;
}
/***************** /*****************
* Core functions * Core functions
*****************/ *****************/
...@@ -686,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { ...@@ -686,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
.llseek = seq_lseek .llseek = seq_lseek
}; };
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
__be32 *m = (__be32 *) buf;
int i;
int count = size / sizeof(__be32);
for (i = count - 1; i >= 0; i--)
if (m[i])
break;
i++;
count = i ? i : 1;
for (i = 0; i < count; i += 4)
dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
}
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{ {
spin_lock(&mlxsw_core_driver_list_lock); spin_lock(&mlxsw_core_driver_list_lock);
...@@ -816,9 +891,168 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, ...@@ -816,9 +891,168 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
} }
static int
mlxsw_devlink_sb_pool_get(struct devlink *devlink,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->sb_pool_get)
return -EOPNOTSUPP;
return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
pool_index, pool_info);
}
static int
mlxsw_devlink_sb_pool_set(struct devlink *devlink,
unsigned int sb_index, u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->sb_pool_set)
return -EOPNOTSUPP;
return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
pool_index, size, threshold_type);
}
static void *__dl_port(struct devlink_port *devlink_port)
{
return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
}
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_port_pool_get)
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
pool_index, p_threshold);
}
static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 threshold)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_port_pool_set)
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
pool_index, threshold);
}
static int
mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_tc_pool_bind_get)
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
tc_index, pool_type,
p_pool_index, p_threshold);
}
static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_tc_pool_bind_set)
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
tc_index, pool_type,
pool_index, threshold);
}
static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
unsigned int sb_index)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->sb_occ_snapshot)
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
}
static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
unsigned int sb_index)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->sb_occ_max_clear)
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
}
static int
mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_occ_port_pool_get)
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
pool_index, p_cur, p_max);
}
static int
mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
if (!mlxsw_driver->sb_occ_tc_port_bind_get)
return -EOPNOTSUPP;
return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
sb_index, tc_index,
pool_type, p_cur, p_max);
}
static const struct devlink_ops mlxsw_devlink_ops = { static const struct devlink_ops mlxsw_devlink_ops = {
.port_split = mlxsw_devlink_port_split, .port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit, .port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
.sb_pool_set = mlxsw_devlink_sb_pool_set,
.sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
.sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
.sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
}; };
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
...@@ -1102,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, ...@@ -1102,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
} }
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
return atomic64_inc_return(&mlxsw_core->emad.tid);
}
static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, const struct mlxsw_reg_info *reg,
char *payload, char *payload,
enum mlxsw_core_reg_access_type type) enum mlxsw_core_reg_access_type type,
struct list_head *bulk_list,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv)
{ {
u64 tid = mlxsw_core_tid_get(mlxsw_core);
struct mlxsw_reg_trans *trans;
int err; int err;
char *op_tlv;
struct sk_buff *skb;
struct mlxsw_tx_info tx_info = {
.local_port = MLXSW_PORT_CPU_PORT,
.is_emad = true,
};
skb = mlxsw_emad_alloc(mlxsw_core, reg->len); trans = kzalloc(sizeof(*trans), GFP_KERNEL);
if (!skb) if (!trans)
return -ENOMEM; return -ENOMEM;
mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
mlxsw_core->driver->txhdr_construct(skb, &tx_info); bulk_list, cb, cb_priv, tid);
if (err) {
kfree(trans);
return err;
}
return 0;
}
dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
mlxsw_core->emad.tid); const struct mlxsw_reg_info *reg, char *payload,
mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); struct list_head *bulk_list,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
{
return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
bulk_list, cb, cb_priv);
}
EXPORT_SYMBOL(mlxsw_reg_trans_query);
err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
if (!err) { const struct mlxsw_reg_info *reg, char *payload,
op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); struct list_head *bulk_list,
memcpy(payload, mlxsw_emad_reg_payload(op_tlv), mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
reg->len); {
return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
bulk_list, cb, cb_priv);
}
EXPORT_SYMBOL(mlxsw_reg_trans_write);
dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
mlxsw_core->emad.tid - 1); {
mlxsw_core_buf_dump_dbg(mlxsw_core, struct mlxsw_core *mlxsw_core = trans->core;
mlxsw_core->emad.resp_skb->data, int err;
mlxsw_core->emad.resp_skb->len);
dev_kfree_skb(mlxsw_core->emad.resp_skb); wait_for_completion(&trans->completion);
} cancel_delayed_work_sync(&trans->timeout_dw);
err = trans->err;
if (trans->retries)
dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
if (err)
dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
trans->tid, trans->reg->id,
mlxsw_reg_id_str(trans->reg->id),
mlxsw_core_reg_access_type_str(trans->type),
trans->emad_status,
mlxsw_emad_op_tlv_status_str(trans->emad_status));
list_del(&trans->bulk_list);
kfree_rcu(trans, rcu);
return err; return err;
} }
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
{
struct mlxsw_reg_trans *trans;
struct mlxsw_reg_trans *tmp;
int sum_err = 0;
int err;
list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
err = mlxsw_reg_trans_wait(trans);
if (err && sum_err == 0)
sum_err = err; /* first error to be returned */
}
return sum_err;
}
EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, const struct mlxsw_reg_info *reg,
char *payload, char *payload,
enum mlxsw_core_reg_access_type type) enum mlxsw_core_reg_access_type type)
{ {
enum mlxsw_emad_op_tlv_status status;
int err, n_retry; int err, n_retry;
char *in_mbox, *out_mbox, *tmp; char *in_mbox, *out_mbox, *tmp;
dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
in_mbox = mlxsw_cmd_mbox_alloc(); in_mbox = mlxsw_cmd_mbox_alloc();
if (!in_mbox) if (!in_mbox)
return -ENOMEM; return -ENOMEM;
...@@ -1162,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, ...@@ -1162,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
goto free_in_mbox; goto free_in_mbox;
} }
mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
mlxsw_core_tid_get(mlxsw_core));
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
mlxsw_emad_pack_reg_tlv(tmp, reg, payload); mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
...@@ -1170,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, ...@@ -1170,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
retry: retry:
err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
if (!err) { if (!err) {
err = mlxsw_emad_process_status(mlxsw_core, out_mbox); err = mlxsw_emad_process_status(out_mbox, &status);
if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) if (err) {
goto retry; if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
goto retry;
dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
status, mlxsw_emad_op_tlv_status_str(status));
}
} }
if (!err) if (!err)
memcpy(payload, mlxsw_emad_reg_payload(out_mbox), memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
reg->len); reg->len);
mlxsw_core->emad.tid++;
mlxsw_cmd_mbox_free(out_mbox); mlxsw_cmd_mbox_free(out_mbox);
free_in_mbox: free_in_mbox:
mlxsw_cmd_mbox_free(in_mbox); mlxsw_cmd_mbox_free(in_mbox);
if (err)
dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
return err; return err;
} }
static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
char *payload, size_t payload_len,
unsigned long cb_priv)
{
char *orig_payload = (char *) cb_priv;
memcpy(orig_payload, payload, payload_len);
}
static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, const struct mlxsw_reg_info *reg,
char *payload, char *payload,
enum mlxsw_core_reg_access_type type) enum mlxsw_core_reg_access_type type)
{ {
u64 cur_tid; LIST_HEAD(bulk_list);
int err; int err;
if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
return -EINTR;
}
cur_tid = mlxsw_core->emad.tid;
dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
/* During initialization EMAD interface is not available to us, /* During initialization EMAD interface is not available to us,
* so we default to command interface. We switch to EMAD interface * so we default to command interface. We switch to EMAD interface
* after setting the appropriate traps. * after setting the appropriate traps.
*/ */
if (!mlxsw_core->emad.use_emad) if (!mlxsw_core->emad.use_emad)
err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
payload, type);
else
err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
payload, type); payload, type);
err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
payload, type, &bulk_list,
mlxsw_core_reg_access_cb,
(unsigned long) payload);
if (err) if (err)
dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", return err;
cur_tid, reg->id, mlxsw_reg_id_str(reg->id), return mlxsw_reg_trans_bulk_wait(&bulk_list);
mlxsw_core_reg_access_type_str(type));
mutex_unlock(&mlxsw_core->emad.lock);
return err;
} }
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
...@@ -1374,6 +1666,24 @@ void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port) ...@@ -1374,6 +1666,24 @@ void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
} }
EXPORT_SYMBOL(mlxsw_core_port_fini); EXPORT_SYMBOL(mlxsw_core_port_fini);
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
__be32 *m = (__be32 *) buf;
int i;
int count = size / sizeof(__be32);
for (i = count - 1; i >= 0; i--)
if (m[i])
break;
i++;
count = i ? i : 1;
for (i = 0; i < count; i += 4)
dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
}
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct, u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size, char *in_mbox, size_t in_mbox_size,
...@@ -1416,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, ...@@ -1416,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
} }
EXPORT_SYMBOL(mlxsw_cmd_exec); EXPORT_SYMBOL(mlxsw_cmd_exec);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(mlxsw_wq, dwork, delay);
}
EXPORT_SYMBOL(mlxsw_core_schedule_dw);
static int __init mlxsw_core_module_init(void) static int __init mlxsw_core_module_init(void)
{ {
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); int err;
if (!mlxsw_core_dbg_root)
mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
if (!mlxsw_wq)
return -ENOMEM; return -ENOMEM;
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
if (!mlxsw_core_dbg_root) {
err = -ENOMEM;
goto err_debugfs_create_dir;
}
return 0; return 0;
err_debugfs_create_dir:
destroy_workqueue(mlxsw_wq);
return err;
} }
static void __exit mlxsw_core_module_exit(void) static void __exit mlxsw_core_module_exit(void)
{ {
debugfs_remove_recursive(mlxsw_core_dbg_root); debugfs_remove_recursive(mlxsw_core_dbg_root);
destroy_workqueue(mlxsw_wq);
} }
module_init(mlxsw_core_module_init); module_init(mlxsw_core_module_init);
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <net/devlink.h> #include <net/devlink.h>
#include "trap.h" #include "trap.h"
...@@ -108,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, ...@@ -108,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el, const struct mlxsw_event_listener *el,
void *priv); void *priv);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload,
struct list_head *bulk_list,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload,
struct list_head *bulk_list,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload); const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
...@@ -137,11 +151,22 @@ struct mlxsw_core_port { ...@@ -137,11 +151,22 @@ struct mlxsw_core_port {
struct devlink_port devlink_port; struct devlink_port devlink_port;
}; };
static inline void *
mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
{
/* mlxsw_core_port is ensured to always be the first field in driver
* port structure.
*/
return mlxsw_core_port;
}
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_core_port *mlxsw_core_port, u8 local_port, struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
struct net_device *dev, bool split, u32 split_group); struct net_device *dev, bool split, u32 split_group);
void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
struct mlxsw_swid_config { struct mlxsw_swid_config {
...@@ -200,6 +225,37 @@ struct mlxsw_driver { ...@@ -200,6 +225,37 @@ struct mlxsw_driver {
int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count); unsigned int count);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 threshold);
int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold);
int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold);
int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max);
int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb, void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info); const struct mlxsw_tx_info *tx_info);
u8 txhdr_len; u8 txhdr_len;
......
...@@ -3566,6 +3566,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); ...@@ -3566,6 +3566,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
*/ */
MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
/* reg_sbcm_max_buff /* reg_sbcm_max_buff
* When the pool associated to the port-pg/tclass is configured to * When the pool associated to the port-pg/tclass is configured to
* static, Maximum buffer size for the limiter configured in cells. * static, Maximum buffer size for the limiter configured in cells.
...@@ -3632,6 +3636,27 @@ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); ...@@ -3632,6 +3636,27 @@ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
*/ */
MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
/* reg_sbpm_buff_occupancy
* Current buffer occupancy in cells.
* Access: RO
*/
MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
/* reg_sbpm_clr
* Clear Max Buffer Occupancy
* When this bit is set, max_buff_occupancy field is cleared (and a
* new max value is tracked from the time the clear was performed).
* Access: OP
*/
MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
/* reg_sbpm_max_buff_occupancy
* Maximum value of buffer occupancy in cells monitored. Cleared by
* writing to the clr field.
* Access: RO
*/
MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
/* reg_sbpm_min_buff /* reg_sbpm_min_buff
* Minimum buffer size for the limiter, in cells. * Minimum buffer size for the limiter, in cells.
* Access: RW * Access: RW
...@@ -3652,17 +3677,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); ...@@ -3652,17 +3677,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir, enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff) u32 min_buff, u32 max_buff)
{ {
MLXSW_REG_ZERO(sbpm, payload); MLXSW_REG_ZERO(sbpm, payload);
mlxsw_reg_sbpm_local_port_set(payload, local_port); mlxsw_reg_sbpm_local_port_set(payload, local_port);
mlxsw_reg_sbpm_pool_set(payload, pool); mlxsw_reg_sbpm_pool_set(payload, pool);
mlxsw_reg_sbpm_dir_set(payload, dir); mlxsw_reg_sbpm_dir_set(payload, dir);
mlxsw_reg_sbpm_clr_set(payload, clr);
mlxsw_reg_sbpm_min_buff_set(payload, min_buff); mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
mlxsw_reg_sbpm_max_buff_set(payload, max_buff); mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
} }
static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
u32 *p_max_buff_occupancy)
{
*p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
*p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
}
/* SBMM - Shared Buffer Multicast Management Register /* SBMM - Shared Buffer Multicast Management Register
* -------------------------------------------------- * --------------------------------------------------
* The SBMM register configures and retrieves the shared buffer allocation * The SBMM register configures and retrieves the shared buffer allocation
...@@ -3718,6 +3751,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, ...@@ -3718,6 +3751,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
mlxsw_reg_sbmm_pool_set(payload, pool); mlxsw_reg_sbmm_pool_set(payload, pool);
} }
/* SBSR - Shared Buffer Status Register
* ------------------------------------
* The SBSR register retrieves the shared buffer occupancy according to
* Port-Pool. Note that this register enables reading a large amount of data.
* It is the user's responsibility to limit the amount of data to ensure the
* response can match the maximum transfer unit. In case the response exceeds
* the maximum transport unit, it will be truncated with no special notice.
*/
#define MLXSW_REG_SBSR_ID 0xB005
#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
MLXSW_REG_SBSR_REC_LEN * \
MLXSW_REG_SBSR_REC_MAX_COUNT)
static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
.id = MLXSW_REG_SBSR_ID,
.len = MLXSW_REG_SBSR_LEN,
};
/* reg_sbsr_clr
* Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
* field is cleared (and a new max value is tracked from the time the clear
* was performed).
* Access: OP
*/
MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
/* reg_sbsr_ingress_port_mask
* Bit vector for all ingress network ports.
* Indicates which of the ports (for which the relevant bit is set)
* are affected by the set operation. Configuration of any other port
* does not change.
* Access: Index
*/
MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
/* reg_sbsr_pg_buff_mask
* Bit vector for all switch priority groups.
* Indicates which of the priorities (for which the relevant bit is set)
* are affected by the set operation. Configuration of any other priority
* does not change.
* Range is 0..cap_max_pg_buffers - 1
* Access: Index
*/
MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
/* reg_sbsr_egress_port_mask
* Bit vector for all egress network ports.
* Indicates which of the ports (for which the relevant bit is set)
* are affected by the set operation. Configuration of any other port
* does not change.
* Access: Index
*/
MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
/* reg_sbsr_tclass_mask
* Bit vector for all traffic classes.
* Indicates which of the traffic classes (for which the relevant bit is
* set) are affected by the set operation. Configuration of any other
* traffic class does not change.
* Range is 0..cap_max_tclass - 1
* Access: Index
*/
MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
{
MLXSW_REG_ZERO(sbsr, payload);
mlxsw_reg_sbsr_clr_set(payload, clr);
}
/* reg_sbsr_rec_buff_occupancy
* Current buffer occupancy in cells.
* Access: RO
*/
MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
/* reg_sbsr_rec_max_buff_occupancy
* Maximum value of buffer occupancy in cells monitored. Cleared by
* writing to the clr field.
* Access: RO
*/
MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
u32 *p_buff_occupancy,
u32 *p_max_buff_occupancy)
{
*p_buff_occupancy =
mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
*p_max_buff_occupancy =
mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
}
static inline const char *mlxsw_reg_id_str(u16 reg_id) static inline const char *mlxsw_reg_id_str(u16 reg_id)
{ {
switch (reg_id) { switch (reg_id) {
...@@ -3813,6 +3944,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) ...@@ -3813,6 +3944,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SBPM"; return "SBPM";
case MLXSW_REG_SBMM_ID: case MLXSW_REG_SBMM_ID:
return "SBMM"; return "SBMM";
case MLXSW_REG_SBSR_ID:
return "SBSR";
default: default:
return "*UNKNOWN*"; return "*UNKNOWN*";
} }
......
...@@ -2434,6 +2434,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, ...@@ -2434,6 +2434,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_switchdev_init: err_switchdev_init:
err_lag_init: err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init: err_buffers_init:
err_flood_init: err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp);
...@@ -2448,6 +2449,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) ...@@ -2448,6 +2449,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
...@@ -2491,16 +2493,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { ...@@ -2491,16 +2493,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
}; };
static struct mlxsw_driver mlxsw_sp_driver = { static struct mlxsw_driver mlxsw_sp_driver = {
.kind = MLXSW_DEVICE_KIND_SPECTRUM, .kind = MLXSW_DEVICE_KIND_SPECTRUM,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.priv_size = sizeof(struct mlxsw_sp), .priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp_init, .init = mlxsw_sp_init,
.fini = mlxsw_sp_fini, .fini = mlxsw_sp_fini,
.port_split = mlxsw_sp_port_split, .port_split = mlxsw_sp_port_split,
.port_unsplit = mlxsw_sp_port_unsplit, .port_unsplit = mlxsw_sp_port_unsplit,
.txhdr_construct = mlxsw_sp_txhdr_construct, .sb_pool_get = mlxsw_sp_sb_pool_get,
.txhdr_len = MLXSW_TXHDR_LEN, .sb_pool_set = mlxsw_sp_sb_pool_set,
.profile = &mlxsw_sp_config_profile, .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
.sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
.sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
.sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
}; };
static int static int
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#define MLXSW_SP_BYTES_PER_CELL 96 #define MLXSW_SP_BYTES_PER_CELL 96
#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
/* Maximum delay buffer needed in case of PAUSE frames, in cells. /* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU. * Assumes 100m cable and maximum MTU.
...@@ -117,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid) ...@@ -117,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE; return fid >= MLXSW_SP_VFID_BASE;
} }
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
};
struct mlxsw_cp_sb_occ {
u32 cur;
u32 max;
};
struct mlxsw_sp_sb_cm {
u32 min_buff;
u32 max_buff;
u8 pool;
struct mlxsw_cp_sb_occ occ;
};
struct mlxsw_sp_sb_pm {
u32 min_buff;
u32 max_buff;
struct mlxsw_cp_sb_occ occ;
};
#define MLXSW_SP_SB_POOL_COUNT 4
#define MLXSW_SP_SB_TC_COUNT 8
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
struct {
struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
} ports[MLXSW_PORT_MAX_PORTS];
};
struct mlxsw_sp { struct mlxsw_sp {
struct { struct {
struct list_head list; struct list_head list;
...@@ -147,6 +182,7 @@ struct mlxsw_sp { ...@@ -147,6 +182,7 @@ struct mlxsw_sp {
struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS]; u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
}; };
static inline struct mlxsw_sp_upper * static inline struct mlxsw_sp_upper *
...@@ -277,7 +313,39 @@ enum mlxsw_sp_flood_table { ...@@ -277,7 +313,39 @@ enum mlxsw_sp_flood_table {
}; };
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 threshold);
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold);
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold);
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max);
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
......
...@@ -36,33 +36,142 @@ ...@@ -36,33 +36,142 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/dcbnl.h> #include <linux/dcbnl.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/list.h>
#include "spectrum.h" #include "spectrum.h"
#include "core.h" #include "core.h"
#include "port.h" #include "port.h"
#include "reg.h" #include "reg.h"
struct mlxsw_sp_pb { static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
u8 index; u8 pool,
u16 size; enum mlxsw_reg_sbxx_dir dir)
}; {
return &mlxsw_sp->sb.prs[dir][pool];
}
#define MLXSW_SP_PB(_index, _size) \ static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
{ \ u8 local_port, u8 pg_buff,
.index = _index, \ enum mlxsw_reg_sbxx_dir dir)
.size = _size, \ {
return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
}
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 pool,
enum mlxsw_reg_sbxx_dir dir)
{
return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
}
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
enum mlxsw_reg_sbxx_dir dir,
enum mlxsw_reg_sbpr_mode mode, u32 size)
{
char sbpr_pl[MLXSW_REG_SBPR_LEN];
struct mlxsw_sp_sb_pr *pr;
int err;
mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
if (err)
return err;
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
pr->mode = mode;
pr->size = size;
return 0;
}
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff, u8 pool)
{
char sbcm_pl[MLXSW_REG_SBCM_LEN];
int err;
mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
min_buff, max_buff, pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
if (err)
return err;
if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
struct mlxsw_sp_sb_cm *cm;
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
cm->min_buff = min_buff;
cm->max_buff = max_buff;
cm->pool = pool;
} }
return 0;
}
static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 pool, enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff)
{
char sbpm_pl[MLXSW_REG_SBPM_LEN];
struct mlxsw_sp_sb_pm *pm;
int err;
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
min_buff, max_buff);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
if (err)
return err;
pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
pm->min_buff = min_buff;
pm->max_buff = max_buff;
return 0;
}
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 pool, enum mlxsw_reg_sbxx_dir dir,
struct list_head *bulk_list)
{
char sbpm_pl[MLXSW_REG_SBPM_LEN];
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
bulk_list, NULL, 0);
}
static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
char *sbpm_pl, size_t sbpm_pl_len,
unsigned long cb_priv)
{
struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}
static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
MLXSW_SP_PB(0, 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN)), u8 pool, enum mlxsw_reg_sbxx_dir dir,
MLXSW_SP_PB(1, 0), struct list_head *bulk_list)
MLXSW_SP_PB(2, 0), {
MLXSW_SP_PB(3, 0), char sbpm_pl[MLXSW_REG_SBPM_LEN];
MLXSW_SP_PB(4, 0), struct mlxsw_sp_sb_pm *pm;
MLXSW_SP_PB(5, 0),
MLXSW_SP_PB(6, 0), pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
MLXSW_SP_PB(7, 0), mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
MLXSW_SP_PB(9, 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU)), return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
bulk_list,
mlxsw_sp_sb_pm_occ_query_cb,
(unsigned long) pm);
}
static const u16 mlxsw_sp_pbs[] = {
2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
0,
0,
0,
0,
0,
0,
0,
0, /* Unused */
2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
}; };
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
...@@ -75,10 +184,9 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -75,10 +184,9 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2); 0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
const struct mlxsw_sp_pb *pb; if (i == 8)
continue;
pb = &mlxsw_sp_pbs[i]; mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
} }
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
...@@ -108,181 +216,174 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -108,181 +216,174 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
} }
struct mlxsw_sp_sb_pool { #define MLXSW_SP_SB_PR_INGRESS_SIZE \
u8 pool;
enum mlxsw_reg_sbxx_dir dir;
enum mlxsw_reg_sbpr_mode mode;
u32 size;
};
#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
(15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
#define MLXSW_SP_SB_PR_EGRESS_SIZE \
(14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ #define MLXSW_SP_SB_PR(_mode, _size) \
{ \ { \
.pool = _pool, \ .mode = _mode, \
.dir = _dir, \ .size = _size, \
.mode = _mode, \
.size = _size, \
} }
#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_REG_SBPR_MODE_DYNAMIC, _size) MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_REG_SBPR_MODE_DYNAMIC, _size) MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
};
static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_INGRESS_SIZE)), #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
MLXSW_SP_SB_POOL_INGRESS(1, 0),
MLXSW_SP_SB_POOL_INGRESS(2, 0), static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
MLXSW_SP_SB_POOL_INGRESS(3, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
MLXSW_SP_SB_POOL_EGRESS(1, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_POOL_EGRESS(2, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_POOL_EGRESS_SIZE)), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
}; };
#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_pr *prs,
size_t prs_len)
{ {
char sbpr_pl[MLXSW_REG_SBPR_LEN];
int i; int i;
int err; int err;
for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { for (i = 0; i < prs_len; i++) {
const struct mlxsw_sp_sb_pool *pool; const struct mlxsw_sp_sb_pr *pr;
pool = &mlxsw_sp_sb_pools[i]; pr = &prs[i];
mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
pool->mode, pool->size); pr->mode, pr->size);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
if (err) if (err)
return err; return err;
} }
return 0; return 0;
} }
struct mlxsw_sp_sb_cm { static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
union { {
u8 pg; int err;
u8 tc;
} u; err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
enum mlxsw_reg_sbxx_dir dir; mlxsw_sp_sb_prs_ingress,
u32 min_buff; MLXSW_SP_SB_PRS_INGRESS_LEN);
u32 max_buff; if (err)
u8 pool; return err;
}; return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
mlxsw_sp_sb_prs_egress,
MLXSW_SP_SB_PRS_EGRESS_LEN);
}
#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
{ \ { \
.u.pg = _pg_tc, \ .min_buff = _min_buff, \
.dir = _dir, \ .max_buff = _max_buff, \
.min_buff = _min_buff, \ .pool = _pool, \
.max_buff = _max_buff, \
.pool = _pool, \
} }
#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBXX_DIR_INGRESS, \ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
_min_buff, _max_buff, 0) MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, \ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
_min_buff, _max_buff, 0) MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBXX_DIR_EGRESS, 104, 2, 3) MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
MLXSW_SP_SB_CM_INGRESS(0, MLXSW_SP_BYTES_TO_CELLS(10000), 8),
MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
MLXSW_SP_SB_CM_INGRESS(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff),
MLXSW_SP_SB_CM_EGRESS(0, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(1, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(2, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(3, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(4, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(5, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(6, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(7, MLXSW_SP_BYTES_TO_CELLS(1500), 9),
MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
}; };
#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(1, 0xff, 0),
};
#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), MLXSW_SP_CPU_PORT_SB_CM,
}; };
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const struct mlxsw_sp_sb_cm *cms, enum mlxsw_reg_sbxx_dir dir,
size_t cms_len) const struct mlxsw_sp_sb_cm *cms,
size_t cms_len)
{ {
char sbcm_pl[MLXSW_REG_SBCM_LEN];
int i; int i;
int err; int err;
for (i = 0; i < cms_len; i++) { for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm; const struct mlxsw_sp_sb_cm *cm;
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
continue; /* PG number 8 does not exist, skip it */
cm = &cms[i]; cm = &cms[i];
mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
cm->min_buff, cm->max_buff, cm->pool); cm->min_buff, cm->max_buff,
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); cm->pool);
if (err) if (err)
return err; return err;
} }
...@@ -291,105 +392,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, ...@@ -291,105 +392,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{ {
return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, int err;
mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
MLXSW_SP_SB_CMS_LEN); err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_INGRESS,
mlxsw_sp_sb_cms_ingress,
MLXSW_SP_SB_CMS_INGRESS_LEN);
if (err)
return err;
return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_EGRESS,
mlxsw_sp_sb_cms_egress,
MLXSW_SP_SB_CMS_EGRESS_LEN);
} }
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{ {
return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
MLXSW_SP_CPU_PORT_SB_MCS_LEN); mlxsw_sp_cpu_port_sb_cms,
MLXSW_SP_CPU_PORT_SB_MCS_LEN);
} }
struct mlxsw_sp_sb_pm { #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
u8 pool; { \
enum mlxsw_reg_sbxx_dir dir; .min_buff = _min_buff, \
u32 min_buff; .max_buff = _max_buff, \
u32 max_buff; }
static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
}; };
#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
{ \
.pool = _pool, \
.dir = _dir, \
.min_buff = _min_buff, \
.max_buff = _max_buff, \
}
#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_INGRESS, \ MLXSW_SP_SB_PM(0, 7),
_min_buff, _max_buff) MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBXX_DIR_EGRESS, \
_min_buff, _max_buff)
static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
}; };
#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
enum mlxsw_reg_sbxx_dir dir,
const struct mlxsw_sp_sb_pm *pms,
size_t pms_len)
{ {
char sbpm_pl[MLXSW_REG_SBPM_LEN];
int i; int i;
int err; int err;
for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { for (i = 0; i < pms_len; i++) {
const struct mlxsw_sp_sb_pm *pm; const struct mlxsw_sp_sb_pm *pm;
pm = &mlxsw_sp_sb_pms[i]; pm = &pms[i];
mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
pm->pool, pm->dir, pm->min_buff, pm->max_buff);
pm->min_buff, pm->max_buff);
err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
MLXSW_REG(sbpm), sbpm_pl);
if (err) if (err)
return err; return err;
} }
return 0; return 0;
} }
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_INGRESS,
mlxsw_sp_sb_pms_ingress,
MLXSW_SP_SB_PMS_INGRESS_LEN);
if (err)
return err;
return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
MLXSW_REG_SBXX_DIR_EGRESS,
mlxsw_sp_sb_pms_egress,
MLXSW_SP_SB_PMS_EGRESS_LEN);
}
struct mlxsw_sp_sb_mm { struct mlxsw_sp_sb_mm {
u8 prio;
u32 min_buff; u32 min_buff;
u32 max_buff; u32 max_buff;
u8 pool; u8 pool;
}; };
#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
{ \ { \
.prio = _prio, \ .min_buff = _min_buff, \
.min_buff = _min_buff, \ .max_buff = _max_buff, \
.max_buff = _max_buff, \ .pool = _pool, \
.pool = _pool, \
} }
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
MLXSW_SP_SB_MM(0, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(1, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(2, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(3, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(4, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(5, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(6, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(7, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(8, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(9, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(10, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(11, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(12, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(13, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
MLXSW_SP_SB_MM(14, MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
}; };
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
...@@ -404,7 +520,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) ...@@ -404,7 +520,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_sb_mm *mc; const struct mlxsw_sp_sb_mm *mc;
mc = &mlxsw_sp_sb_mms[i]; mc = &mlxsw_sp_sb_mms[i];
mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
mc->max_buff, mc->pool); mc->max_buff, mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err) if (err)
...@@ -413,19 +529,32 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) ...@@ -413,19 +529,32 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0; return 0;
} }
#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{ {
int err; int err;
err = mlxsw_sp_sb_pools_init(mlxsw_sp); err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err) if (err)
return err; return err;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err) if (err)
return err; return err;
err = mlxsw_sp_sb_mms_init(mlxsw_sp); err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
return err;
return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
MLXSW_SP_SB_SIZE,
MLXSW_SP_SB_POOL_COUNT,
MLXSW_SP_SB_POOL_COUNT,
MLXSW_SP_SB_TC_COUNT,
MLXSW_SP_SB_TC_COUNT);
}
return err; void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
} }
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
...@@ -442,3 +571,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) ...@@ -442,3 +571,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err; return err;
} }
static u8 pool_get(u16 pool_index)
{
return pool_index % MLXSW_SP_SB_POOL_COUNT;
}
static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
{
u16 pool_index;
pool_index = pool;
if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
pool_index += MLXSW_SP_SB_POOL_COUNT;
return pool_index;
}
static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
{
return pool_index < MLXSW_SP_SB_POOL_COUNT ?
MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
}
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
pool_info->pool_type = dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
pool_info->threshold_type = pr->mode;
return 0;
}
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
{
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
return MLXSW_SP_CELLS_TO_BYTES(max_buff);
}
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
enum mlxsw_reg_sbxx_dir dir, u32 threshold,
u32 *p_max_buff)
{
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
int val;
val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
return -EINVAL;
*p_max_buff = val;
} else {
*p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
}
return 0;
}
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
pm->max_buff);
return 0;
}
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 threshold)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
u32 max_buff;
int err;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff);
if (err)
return err;
return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
0, max_buff);
}
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
*p_pool_index = pool_index_get(cm->pool, pool_type);
return 0;
}
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
u8 pool = pool_index;
u32 max_buff;
int err;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff);
if (err)
return err;
if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
if (pool < MLXSW_SP_SB_POOL_COUNT)
return -EINVAL;
pool -= MLXSW_SP_SB_POOL_COUNT;
} else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
return -EINVAL;
}
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
0, max_buff, pool);
}
#define MASKED_COUNT_MAX \
(MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
u8 masked_count;
u8 local_port_1;
};
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
char *sbsr_pl, size_t sbsr_pl_len,
unsigned long cb_priv)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count;
u8 local_port;
int rec_index = 0;
struct mlxsw_sp_sb_cm *cm;
int i;
memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_INGRESS);
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
&cm->occ.cur, &cm->occ.max);
}
if (++masked_count == cb_ctx.masked_count)
break;
}
masked_count = 0;
for (local_port = cb_ctx.local_port_1;
local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_EGRESS);
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
&cm->occ.cur, &cm->occ.max);
}
if (++masked_count == cb_ctx.masked_count)
break;
}
}
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
unsigned long cb_priv;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
u8 local_port_1;
u8 local_port = 0;
int i;
int err;
int err2;
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
if (!sbsr_pl)
return -ENOMEM;
next_batch:
local_port++;
local_port_1 = local_port;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_INGRESS,
&bulk_list);
if (err)
goto out;
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_EGRESS,
&bulk_list);
if (err)
goto out;
}
if (++masked_count == MASKED_COUNT_MAX)
goto do_query;
}
do_query:
cb_ctx.masked_count = masked_count;
cb_ctx.local_port_1 = local_port_1;
memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
&bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
cb_priv);
if (err)
goto out;
if (local_port < MLXSW_PORT_MAX_PORTS)
goto next_batch;
out:
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
if (!err)
err = err2;
kfree(sbsr_pl);
return err;
}
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
LIST_HEAD(bulk_list);
char *sbsr_pl;
unsigned int masked_count;
u8 local_port = 0;
int i;
int err;
int err2;
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
if (!sbsr_pl)
return -ENOMEM;
next_batch:
local_port++;
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
}
for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
if (!mlxsw_sp->ports[local_port])
continue;
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_INGRESS,
&bulk_list);
if (err)
goto out;
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
MLXSW_REG_SBXX_DIR_EGRESS,
&bulk_list);
if (err)
goto out;
}
if (++masked_count == MASKED_COUNT_MAX)
goto do_query;
}
do_query:
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
&bulk_list, NULL, 0);
if (err)
goto out;
if (local_port < MLXSW_PORT_MAX_PORTS)
goto next_batch;
out:
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
if (!err)
err = err2;
kfree(sbsr_pl);
return err;
}
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
pool, dir);
*p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
*p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
return 0;
}
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
*p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
return 0;
}
...@@ -1430,8 +1430,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, ...@@ -1430,8 +1430,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{ {
schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
} }
static void mlxsw_sp_fdb_notify_work(struct work_struct *work) static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
......
...@@ -24,6 +24,7 @@ struct devlink_ops; ...@@ -24,6 +24,7 @@ struct devlink_ops;
struct devlink { struct devlink {
struct list_head list; struct list_head list;
struct list_head port_list; struct list_head port_list;
struct list_head sb_list;
const struct devlink_ops *ops; const struct devlink_ops *ops;
struct device *dev; struct device *dev;
possible_net_t _net; possible_net_t _net;
...@@ -42,6 +43,12 @@ struct devlink_port { ...@@ -42,6 +43,12 @@ struct devlink_port {
u32 split_group; u32 split_group;
}; };
struct devlink_sb_pool_info {
enum devlink_sb_pool_type pool_type;
u32 size;
enum devlink_sb_threshold_type threshold_type;
};
struct devlink_ops { struct devlink_ops {
size_t priv_size; size_t priv_size;
int (*port_type_set)(struct devlink_port *devlink_port, int (*port_type_set)(struct devlink_port *devlink_port,
...@@ -49,6 +56,40 @@ struct devlink_ops { ...@@ -49,6 +56,40 @@ struct devlink_ops {
int (*port_split)(struct devlink *devlink, unsigned int port_index, int (*port_split)(struct devlink *devlink, unsigned int port_index,
unsigned int count); unsigned int count);
int (*port_unsplit)(struct devlink *devlink, unsigned int port_index); int (*port_unsplit)(struct devlink *devlink, unsigned int port_index);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info);
int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type);
int (*sb_port_pool_get)(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*sb_port_pool_set)(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 threshold);
int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
unsigned int sb_index,
u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold);
int (*sb_tc_pool_bind_set)(struct devlink_port *devlink_port,
unsigned int sb_index,
u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold);
int (*sb_occ_snapshot)(struct devlink *devlink,
unsigned int sb_index);
int (*sb_occ_max_clear)(struct devlink *devlink,
unsigned int sb_index);
int (*sb_occ_port_pool_get)(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max);
int (*sb_occ_tc_port_bind_get)(struct devlink_port *devlink_port,
unsigned int sb_index,
u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
}; };
static inline void *devlink_priv(struct devlink *devlink) static inline void *devlink_priv(struct devlink *devlink)
...@@ -82,6 +123,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port, ...@@ -82,6 +123,11 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
void devlink_port_type_clear(struct devlink_port *devlink_port); void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_split_set(struct devlink_port *devlink_port, void devlink_port_split_set(struct devlink_port *devlink_port,
u32 split_group); u32 split_group);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
u16 egress_tc_count);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
#else #else
...@@ -135,6 +181,19 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port, ...@@ -135,6 +181,19 @@ static inline void devlink_port_split_set(struct devlink_port *devlink_port,
{ {
} }
static inline int devlink_sb_register(struct devlink *devlink,
unsigned int sb_index, u32 size,
u16 ingress_pools_count,
u16 egress_pools_count, u16 tc_count)
{
return 0;
}
static inline void devlink_sb_unregister(struct devlink *devlink,
unsigned int sb_index)
{
}
#endif #endif
#endif /* _NET_DEVLINK_H_ */ #endif /* _NET_DEVLINK_H_ */
...@@ -33,6 +33,30 @@ enum devlink_command { ...@@ -33,6 +33,30 @@ enum devlink_command {
DEVLINK_CMD_PORT_SPLIT, DEVLINK_CMD_PORT_SPLIT,
DEVLINK_CMD_PORT_UNSPLIT, DEVLINK_CMD_PORT_UNSPLIT,
DEVLINK_CMD_SB_GET, /* can dump */
DEVLINK_CMD_SB_SET,
DEVLINK_CMD_SB_NEW,
DEVLINK_CMD_SB_DEL,
DEVLINK_CMD_SB_POOL_GET, /* can dump */
DEVLINK_CMD_SB_POOL_SET,
DEVLINK_CMD_SB_POOL_NEW,
DEVLINK_CMD_SB_POOL_DEL,
DEVLINK_CMD_SB_PORT_POOL_GET, /* can dump */
DEVLINK_CMD_SB_PORT_POOL_SET,
DEVLINK_CMD_SB_PORT_POOL_NEW,
DEVLINK_CMD_SB_PORT_POOL_DEL,
DEVLINK_CMD_SB_TC_POOL_BIND_GET, /* can dump */
DEVLINK_CMD_SB_TC_POOL_BIND_SET,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
DEVLINK_CMD_SB_TC_POOL_BIND_DEL,
/* Shared buffer occupancy monitoring commands */
DEVLINK_CMD_SB_OCC_SNAPSHOT,
DEVLINK_CMD_SB_OCC_MAX_CLEAR,
/* add new commands above here */ /* add new commands above here */
__DEVLINK_CMD_MAX, __DEVLINK_CMD_MAX,
...@@ -46,6 +70,31 @@ enum devlink_port_type { ...@@ -46,6 +70,31 @@ enum devlink_port_type {
DEVLINK_PORT_TYPE_IB, DEVLINK_PORT_TYPE_IB,
}; };
enum devlink_sb_pool_type {
DEVLINK_SB_POOL_TYPE_INGRESS,
DEVLINK_SB_POOL_TYPE_EGRESS,
};
/* static threshold - limiting the maximum number of bytes.
* dynamic threshold - limiting the maximum number of bytes
* based on the currently available free space in the shared buffer pool.
* In this mode, the maximum quota is calculated based
* on the following formula:
* max_quota = alpha / (1 + alpha) * Free_Buffer
* While Free_Buffer is the amount of none-occupied buffer associated to
* the relevant pool.
* The value range which can be passed is 0-20 and serves
* for computation of alpha by following formula:
* alpha = 2 ^ (passed_value - 10)
*/
enum devlink_sb_threshold_type {
DEVLINK_SB_THRESHOLD_TYPE_STATIC,
DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC,
};
#define DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX 20
enum devlink_attr { enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */ /* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC, DEVLINK_ATTR_UNSPEC,
...@@ -62,6 +111,20 @@ enum devlink_attr { ...@@ -62,6 +111,20 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */ DEVLINK_ATTR_PORT_IBDEV_NAME, /* string */
DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_COUNT, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */ DEVLINK_ATTR_PORT_SPLIT_GROUP, /* u32 */
DEVLINK_ATTR_SB_INDEX, /* u32 */
DEVLINK_ATTR_SB_SIZE, /* u32 */
DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, /* u16 */
DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, /* u16 */
DEVLINK_ATTR_SB_INGRESS_TC_COUNT, /* u16 */
DEVLINK_ATTR_SB_EGRESS_TC_COUNT, /* u16 */
DEVLINK_ATTR_SB_POOL_INDEX, /* u16 */
DEVLINK_ATTR_SB_POOL_TYPE, /* u8 */
DEVLINK_ATTR_SB_POOL_SIZE, /* u32 */
DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, /* u8 */
DEVLINK_ATTR_SB_THRESHOLD, /* u32 */
DEVLINK_ATTR_SB_TC_INDEX, /* u16 */
DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
/* add new attributes above here, update the policy in devlink.c */ /* add new attributes above here, update the policy in devlink.c */
......
...@@ -119,8 +119,171 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink, ...@@ -119,8 +119,171 @@ static struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
return devlink_port_get_from_attrs(devlink, info->attrs); return devlink_port_get_from_attrs(devlink, info->attrs);
} }
struct devlink_sb {
struct list_head list;
unsigned int index;
u32 size;
u16 ingress_pools_count;
u16 egress_pools_count;
u16 ingress_tc_count;
u16 egress_tc_count;
};
static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
{
return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
}
static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
unsigned int sb_index)
{
struct devlink_sb *devlink_sb;
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
if (devlink_sb->index == sb_index)
return devlink_sb;
}
return NULL;
}
static bool devlink_sb_index_exists(struct devlink *devlink,
unsigned int sb_index)
{
return devlink_sb_get_by_index(devlink, sb_index);
}
static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
struct nlattr **attrs)
{
if (attrs[DEVLINK_ATTR_SB_INDEX]) {
u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
struct devlink_sb *devlink_sb;
devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
if (!devlink_sb)
return ERR_PTR(-ENODEV);
return devlink_sb;
}
return ERR_PTR(-EINVAL);
}
static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
struct genl_info *info)
{
return devlink_sb_get_from_attrs(devlink, info->attrs);
}
static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
struct nlattr **attrs,
u16 *p_pool_index)
{
u16 val;
if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
return -EINVAL;
val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
if (val >= devlink_sb_pool_count(devlink_sb))
return -EINVAL;
*p_pool_index = val;
return 0;
}
static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
struct genl_info *info,
u16 *p_pool_index)
{
return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
p_pool_index);
}
static int
devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
enum devlink_sb_pool_type *p_pool_type)
{
u8 val;
if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
return -EINVAL;
val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
val != DEVLINK_SB_POOL_TYPE_EGRESS)
return -EINVAL;
*p_pool_type = val;
return 0;
}
static int
devlink_sb_pool_type_get_from_info(struct genl_info *info,
enum devlink_sb_pool_type *p_pool_type)
{
return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
}
static int
devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
enum devlink_sb_threshold_type *p_th_type)
{
u8 val;
if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
return -EINVAL;
val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
return -EINVAL;
*p_th_type = val;
return 0;
}
static int
devlink_sb_th_type_get_from_info(struct genl_info *info,
enum devlink_sb_threshold_type *p_th_type)
{
return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
}
static int
devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
struct nlattr **attrs,
enum devlink_sb_pool_type pool_type,
u16 *p_tc_index)
{
u16 val;
if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
return -EINVAL;
val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
val >= devlink_sb->ingress_tc_count)
return -EINVAL;
if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
val >= devlink_sb->egress_tc_count)
return -EINVAL;
*p_tc_index = val;
return 0;
}
static int
devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
struct genl_info *info,
enum devlink_sb_pool_type pool_type,
u16 *p_tc_index)
{
return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
pool_type, p_tc_index);
}
#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) #define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0)
#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) #define DEVLINK_NL_FLAG_NEED_PORT BIT(1)
#define DEVLINK_NL_FLAG_NEED_SB BIT(2)
#define DEVLINK_NL_FLAG_LOCK_PORTS BIT(3)
/* port is not needed but we need to ensure they don't
* change in the middle of command
*/
static int devlink_nl_pre_doit(const struct genl_ops *ops, static int devlink_nl_pre_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info) struct sk_buff *skb, struct genl_info *info)
...@@ -147,13 +310,29 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, ...@@ -147,13 +310,29 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
} }
info->user_ptr[0] = devlink_port; info->user_ptr[0] = devlink_port;
} }
if (ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS) {
mutex_lock(&devlink_port_mutex);
}
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
struct devlink_sb *devlink_sb;
devlink_sb = devlink_sb_get_from_info(devlink, info);
if (IS_ERR(devlink_sb)) {
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT)
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
return PTR_ERR(devlink_sb);
}
info->user_ptr[1] = devlink_sb;
}
return 0; return 0;
} }
static void devlink_nl_post_doit(const struct genl_ops *ops, static void devlink_nl_post_doit(const struct genl_ops *ops,
struct sk_buff *skb, struct genl_info *info) struct sk_buff *skb, struct genl_info *info)
{ {
if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT ||
ops->internal_flags & DEVLINK_NL_FLAG_LOCK_PORTS)
mutex_unlock(&devlink_port_mutex); mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex); mutex_unlock(&devlink_mutex);
} }
...@@ -499,126 +678,928 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, ...@@ -499,126 +678,928 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
return devlink_port_unsplit(devlink, port_index); return devlink_port_unsplit(devlink, port_index);
} }
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, struct devlink_sb *devlink_sb,
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, enum devlink_command cmd, u32 portid,
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 }, u32 seq, int flags)
[DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 }, {
[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 }, void *hdr;
};
static const struct genl_ops devlink_nl_ops[] = { hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
{ if (!hdr)
.cmd = DEVLINK_CMD_GET, return -EMSGSIZE;
.doit = devlink_nl_cmd_get_doit,
.dumpit = devlink_nl_cmd_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_GET,
.doit = devlink_nl_cmd_port_get_doit,
.dumpit = devlink_nl_cmd_port_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_SET,
.doit = devlink_nl_cmd_port_set_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_SPLIT,
.doit = devlink_nl_cmd_port_split_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
};
/** if (devlink_nl_put_handle(msg, devlink))
* devlink_alloc - Allocate new devlink instance resources goto nla_put_failure;
* if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
* @ops: ops goto nla_put_failure;
* @priv_size: size of user private data if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
* goto nla_put_failure;
* Allocate new devlink instance resources, including devlink index if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
* and name. devlink_sb->ingress_pools_count))
*/ goto nla_put_failure;
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
{ devlink_sb->egress_pools_count))
struct devlink *devlink; goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
devlink_sb->ingress_tc_count))
goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
devlink_sb->egress_tc_count))
goto nla_put_failure;
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL); genlmsg_end(msg, hdr);
if (!devlink) return 0;
return NULL;
devlink->ops = ops; nla_put_failure:
devlink_net_set(devlink, &init_net); genlmsg_cancel(msg, hdr);
INIT_LIST_HEAD(&devlink->port_list); return -EMSGSIZE;
return devlink;
} }
EXPORT_SYMBOL_GPL(devlink_alloc);
/** static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
* devlink_register - Register devlink instance struct genl_info *info)
*
* @devlink: devlink
*/
int devlink_register(struct devlink *devlink, struct device *dev)
{ {
mutex_lock(&devlink_mutex); struct devlink *devlink = info->user_ptr[0];
devlink->dev = dev; struct devlink_sb *devlink_sb = info->user_ptr[1];
list_add_tail(&devlink->list, &devlink_list); struct sk_buff *msg;
devlink_notify(devlink, DEVLINK_CMD_NEW); int err;
mutex_unlock(&devlink_mutex);
return 0; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
DEVLINK_CMD_SB_NEW,
info->snd_portid, info->snd_seq, 0);
if (err) {
nlmsg_free(msg);
return err;
}
return genlmsg_reply(msg, info);
} }
EXPORT_SYMBOL_GPL(devlink_register);
/** static int devlink_nl_cmd_sb_get_dumpit(struct sk_buff *msg,
* devlink_unregister - Unregister devlink instance struct netlink_callback *cb)
*
* @devlink: devlink
*/
void devlink_unregister(struct devlink *devlink)
{ {
struct devlink *devlink;
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
int err;
mutex_lock(&devlink_mutex); mutex_lock(&devlink_mutex);
devlink_notify(devlink, DEVLINK_CMD_DEL); list_for_each_entry(devlink, &devlink_list, list) {
list_del(&devlink->list); if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
if (idx < start) {
idx++;
continue;
}
err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
DEVLINK_CMD_SB_NEW,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err)
goto out;
idx++;
}
}
out:
mutex_unlock(&devlink_mutex); mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
return msg->len;
} }
EXPORT_SYMBOL_GPL(devlink_unregister);
/** static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
* devlink_free - Free devlink instance resources struct devlink_sb *devlink_sb,
* u16 pool_index, enum devlink_command cmd,
* @devlink: devlink u32 portid, u32 seq, int flags)
*/
void devlink_free(struct devlink *devlink)
{ {
kfree(devlink); struct devlink_sb_pool_info pool_info;
void *hdr;
int err;
err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
pool_index, &pool_info);
if (err)
return err;
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
if (!hdr)
return -EMSGSIZE;
if (devlink_nl_put_handle(msg, devlink))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
goto nla_put_failure;
if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
goto nla_put_failure;
if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
pool_info.threshold_type))
goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
} }
EXPORT_SYMBOL_GPL(devlink_free);
/** static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
* devlink_port_register - Register devlink port struct genl_info *info)
* {
* @devlink: devlink struct devlink *devlink = info->user_ptr[0];
* @devlink_port: devlink port struct devlink_sb *devlink_sb = info->user_ptr[1];
* @port_index struct sk_buff *msg;
* u16 pool_index;
* Register devlink port with provided port index. User can use int err;
err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
&pool_index);
if (err)
return err;
if (!devlink->ops || !devlink->ops->sb_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
DEVLINK_CMD_SB_POOL_NEW,
info->snd_portid, info->snd_seq, 0);
if (err) {
nlmsg_free(msg);
return err;
}
return genlmsg_reply(msg, info);
}
static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
u32 portid, u32 seq)
{
u16 pool_count = devlink_sb_pool_count(devlink_sb);
u16 pool_index;
int err;
for (pool_index = 0; pool_index < pool_count; pool_index++) {
if (*p_idx < start) {
(*p_idx)++;
continue;
}
err = devlink_nl_sb_pool_fill(msg, devlink,
devlink_sb,
pool_index,
DEVLINK_CMD_SB_POOL_NEW,
portid, seq, NLM_F_MULTI);
if (err)
return err;
(*p_idx)++;
}
return 0;
}
static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
struct netlink_callback *cb)
{
struct devlink *devlink;
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
int err;
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_pool_get)
continue;
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_pool_get_dumpit(msg, start, &idx, devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
goto out;
}
}
out:
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
return msg->len;
}
static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type)
{
const struct devlink_ops *ops = devlink->ops;
if (ops && ops->sb_pool_set)
return ops->sb_pool_set(devlink, sb_index, pool_index,
size, threshold_type);
return -EOPNOTSUPP;
}
static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_sb *devlink_sb = info->user_ptr[1];
enum devlink_sb_threshold_type threshold_type;
u16 pool_index;
u32 size;
int err;
err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
&pool_index);
if (err)
return err;
err = devlink_sb_th_type_get_from_info(info, &threshold_type);
if (err)
return err;
if (!info->attrs[DEVLINK_ATTR_SB_POOL_SIZE])
return -EINVAL;
size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
return devlink_sb_pool_set(devlink, devlink_sb->index,
pool_index, size, threshold_type);
}
static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
struct devlink *devlink,
struct devlink_port *devlink_port,
struct devlink_sb *devlink_sb,
u16 pool_index,
enum devlink_command cmd,
u32 portid, u32 seq, int flags)
{
const struct devlink_ops *ops = devlink->ops;
u32 threshold;
void *hdr;
int err;
err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
pool_index, &threshold);
if (err)
return err;
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
if (!hdr)
return -EMSGSIZE;
if (devlink_nl_put_handle(msg, devlink))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
goto nla_put_failure;
if (ops->sb_occ_port_pool_get) {
u32 cur;
u32 max;
err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
pool_index, &cur, &max);
if (err && err != -EOPNOTSUPP)
return err;
if (!err) {
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
goto nla_put_failure;
}
}
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[0];
struct devlink *devlink = devlink_port->devlink;
struct devlink_sb *devlink_sb = info->user_ptr[1];
struct sk_buff *msg;
u16 pool_index;
int err;
err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
&pool_index);
if (err)
return err;
if (!devlink->ops || !devlink->ops->sb_port_pool_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
devlink_sb, pool_index,
DEVLINK_CMD_SB_PORT_POOL_NEW,
info->snd_portid, info->snd_seq, 0);
if (err) {
nlmsg_free(msg);
return err;
}
return genlmsg_reply(msg, info);
}
static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
u32 portid, u32 seq)
{
struct devlink_port *devlink_port;
u16 pool_count = devlink_sb_pool_count(devlink_sb);
u16 pool_index;
int err;
list_for_each_entry(devlink_port, &devlink->port_list, list) {
for (pool_index = 0; pool_index < pool_count; pool_index++) {
if (*p_idx < start) {
(*p_idx)++;
continue;
}
err = devlink_nl_sb_port_pool_fill(msg, devlink,
devlink_port,
devlink_sb,
pool_index,
DEVLINK_CMD_SB_PORT_POOL_NEW,
portid, seq,
NLM_F_MULTI);
if (err)
return err;
(*p_idx)++;
}
}
return 0;
}
static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
struct netlink_callback *cb)
{
struct devlink *devlink;
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
int err;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_port_pool_get)
continue;
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_port_pool_get_dumpit(msg, start, &idx,
devlink, devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
goto out;
}
}
out:
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
return msg->len;
}
static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 threshold)
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
if (ops && ops->sb_port_pool_set)
return ops->sb_port_pool_set(devlink_port, sb_index,
pool_index, threshold);
return -EOPNOTSUPP;
}
static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[0];
struct devlink_sb *devlink_sb = info->user_ptr[1];
u16 pool_index;
u32 threshold;
int err;
err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
&pool_index);
if (err)
return err;
if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
return -EINVAL;
threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
pool_index, threshold);
}
static int
devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
struct devlink_port *devlink_port,
struct devlink_sb *devlink_sb, u16 tc_index,
enum devlink_sb_pool_type pool_type,
enum devlink_command cmd,
u32 portid, u32 seq, int flags)
{
const struct devlink_ops *ops = devlink->ops;
u16 pool_index;
u32 threshold;
void *hdr;
int err;
err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
tc_index, pool_type,
&pool_index, &threshold);
if (err)
return err;
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
if (!hdr)
return -EMSGSIZE;
if (devlink_nl_put_handle(msg, devlink))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
goto nla_put_failure;
if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
goto nla_put_failure;
if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
goto nla_put_failure;
if (ops->sb_occ_tc_port_bind_get) {
u32 cur;
u32 max;
err = ops->sb_occ_tc_port_bind_get(devlink_port,
devlink_sb->index,
tc_index, pool_type,
&cur, &max);
if (err && err != -EOPNOTSUPP)
return err;
if (!err) {
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
goto nla_put_failure;
if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
goto nla_put_failure;
}
}
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
}
static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[0];
struct devlink *devlink = devlink_port->devlink;
struct devlink_sb *devlink_sb = info->user_ptr[1];
struct sk_buff *msg;
enum devlink_sb_pool_type pool_type;
u16 tc_index;
int err;
err = devlink_sb_pool_type_get_from_info(info, &pool_type);
if (err)
return err;
err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
pool_type, &tc_index);
if (err)
return err;
if (!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
return -EOPNOTSUPP;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
devlink_sb, tc_index, pool_type,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
info->snd_portid,
info->snd_seq, 0);
if (err) {
nlmsg_free(msg);
return err;
}
return genlmsg_reply(msg, info);
}
static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
int start, int *p_idx,
struct devlink *devlink,
struct devlink_sb *devlink_sb,
u32 portid, u32 seq)
{
struct devlink_port *devlink_port;
u16 tc_index;
int err;
list_for_each_entry(devlink_port, &devlink->port_list, list) {
for (tc_index = 0;
tc_index < devlink_sb->ingress_tc_count; tc_index++) {
if (*p_idx < start) {
(*p_idx)++;
continue;
}
err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
devlink_port,
devlink_sb,
tc_index,
DEVLINK_SB_POOL_TYPE_INGRESS,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
portid, seq,
NLM_F_MULTI);
if (err)
return err;
(*p_idx)++;
}
for (tc_index = 0;
tc_index < devlink_sb->egress_tc_count; tc_index++) {
if (*p_idx < start) {
(*p_idx)++;
continue;
}
err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
devlink_port,
devlink_sb,
tc_index,
DEVLINK_SB_POOL_TYPE_EGRESS,
DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
portid, seq,
NLM_F_MULTI);
if (err)
return err;
(*p_idx)++;
}
}
return 0;
}
static int
devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
struct netlink_callback *cb)
{
struct devlink *devlink;
struct devlink_sb *devlink_sb;
int start = cb->args[0];
int idx = 0;
int err;
mutex_lock(&devlink_mutex);
mutex_lock(&devlink_port_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
!devlink->ops || !devlink->ops->sb_tc_pool_bind_get)
continue;
list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
err = __sb_tc_pool_bind_get_dumpit(msg, start, &idx,
devlink,
devlink_sb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq);
if (err && err != -EOPNOTSUPP)
goto out;
}
}
out:
mutex_unlock(&devlink_port_mutex);
mutex_unlock(&devlink_mutex);
cb->args[0] = idx;
return msg->len;
}
static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold)
{
const struct devlink_ops *ops = devlink_port->devlink->ops;
if (ops && ops->sb_tc_pool_bind_set)
return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
tc_index, pool_type,
pool_index, threshold);
return -EOPNOTSUPP;
}
static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink_port *devlink_port = info->user_ptr[0];
struct devlink_sb *devlink_sb = info->user_ptr[1];
enum devlink_sb_pool_type pool_type;
u16 tc_index;
u16 pool_index;
u32 threshold;
int err;
err = devlink_sb_pool_type_get_from_info(info, &pool_type);
if (err)
return err;
err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
pool_type, &tc_index);
if (err)
return err;
err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
&pool_index);
if (err)
return err;
if (!info->attrs[DEVLINK_ATTR_SB_THRESHOLD])
return -EINVAL;
threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
tc_index, pool_type,
pool_index, threshold);
}
static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
if (ops && ops->sb_occ_snapshot)
return ops->sb_occ_snapshot(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_sb *devlink_sb = info->user_ptr[1];
const struct devlink_ops *ops = devlink->ops;
if (ops && ops->sb_occ_max_clear)
return ops->sb_occ_max_clear(devlink, devlink_sb->index);
return -EOPNOTSUPP;
}
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
[DEVLINK_ATTR_SB_POOL_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_POOL_SIZE] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
};
static const struct genl_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_GET,
.doit = devlink_nl_cmd_get_doit,
.dumpit = devlink_nl_cmd_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_GET,
.doit = devlink_nl_cmd_port_get_doit,
.dumpit = devlink_nl_cmd_port_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_PORT_SET,
.doit = devlink_nl_cmd_port_set_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
},
{
.cmd = DEVLINK_CMD_PORT_SPLIT,
.doit = devlink_nl_cmd_port_split_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.doit = devlink_nl_cmd_port_unsplit_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
},
{
.cmd = DEVLINK_CMD_SB_GET,
.doit = devlink_nl_cmd_sb_get_doit,
.dumpit = devlink_nl_cmd_sb_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_POOL_GET,
.doit = devlink_nl_cmd_sb_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_POOL_SET,
.doit = devlink_nl_cmd_sb_pool_set_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
.doit = devlink_nl_cmd_sb_port_pool_get_doit,
.dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
.doit = devlink_nl_cmd_sb_port_pool_set_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
.doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
.dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
.policy = devlink_nl_policy,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
/* can be retrieved by unprivileged users */
},
{
.cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
.doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
DEVLINK_NL_FLAG_NEED_SB,
},
{
.cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
.doit = devlink_nl_cmd_sb_occ_snapshot_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB |
DEVLINK_NL_FLAG_LOCK_PORTS,
},
{
.cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
.doit = devlink_nl_cmd_sb_occ_max_clear_doit,
.policy = devlink_nl_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
DEVLINK_NL_FLAG_NEED_SB |
DEVLINK_NL_FLAG_LOCK_PORTS,
},
};
/**
* devlink_alloc - Allocate new devlink instance resources
*
* @ops: ops
* @priv_size: size of user private data
*
* Allocate new devlink instance resources, including devlink index
* and name.
*/
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
{
struct devlink *devlink;
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
if (!devlink)
return NULL;
devlink->ops = ops;
devlink_net_set(devlink, &init_net);
INIT_LIST_HEAD(&devlink->port_list);
INIT_LIST_HEAD(&devlink->sb_list);
return devlink;
}
EXPORT_SYMBOL_GPL(devlink_alloc);
/**
* devlink_register - Register devlink instance
*
* @devlink: devlink
*/
int devlink_register(struct devlink *devlink, struct device *dev)
{
mutex_lock(&devlink_mutex);
devlink->dev = dev;
list_add_tail(&devlink->list, &devlink_list);
devlink_notify(devlink, DEVLINK_CMD_NEW);
mutex_unlock(&devlink_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(devlink_register);
/**
* devlink_unregister - Unregister devlink instance
*
* @devlink: devlink
*/
void devlink_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
devlink_notify(devlink, DEVLINK_CMD_DEL);
list_del(&devlink->list);
mutex_unlock(&devlink_mutex);
}
EXPORT_SYMBOL_GPL(devlink_unregister);
/**
* devlink_free - Free devlink instance resources
*
* @devlink: devlink
*/
void devlink_free(struct devlink *devlink)
{
kfree(devlink);
}
EXPORT_SYMBOL_GPL(devlink_free);
/**
* devlink_port_register - Register devlink port
*
* @devlink: devlink
* @devlink_port: devlink port
* @port_index
*
* Register devlink port with provided port index. User can use
* any indexing, even hw-related one. devlink_port structure * any indexing, even hw-related one. devlink_port structure
* is convenient to be embedded inside user driver private structure. * is convenient to be embedded inside user driver private structure.
* Note that the caller should take care of zeroing the devlink_port * Note that the caller should take care of zeroing the devlink_port
...@@ -721,6 +1702,51 @@ void devlink_port_split_set(struct devlink_port *devlink_port, ...@@ -721,6 +1702,51 @@ void devlink_port_split_set(struct devlink_port *devlink_port,
} }
EXPORT_SYMBOL_GPL(devlink_port_split_set); EXPORT_SYMBOL_GPL(devlink_port_split_set);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
u16 egress_tc_count)
{
struct devlink_sb *devlink_sb;
int err = 0;
mutex_lock(&devlink_mutex);
if (devlink_sb_index_exists(devlink, sb_index)) {
err = -EEXIST;
goto unlock;
}
devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
if (!devlink_sb) {
err = -ENOMEM;
goto unlock;
}
devlink_sb->index = sb_index;
devlink_sb->size = size;
devlink_sb->ingress_pools_count = ingress_pools_count;
devlink_sb->egress_pools_count = egress_pools_count;
devlink_sb->ingress_tc_count = ingress_tc_count;
devlink_sb->egress_tc_count = egress_tc_count;
list_add_tail(&devlink_sb->list, &devlink->sb_list);
unlock:
mutex_unlock(&devlink_mutex);
return err;
}
EXPORT_SYMBOL_GPL(devlink_sb_register);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
{
struct devlink_sb *devlink_sb;
mutex_lock(&devlink_mutex);
devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
WARN_ON(!devlink_sb);
list_del(&devlink_sb->list);
mutex_unlock(&devlink_mutex);
kfree(devlink_sb);
}
EXPORT_SYMBOL_GPL(devlink_sb_unregister);
static int __init devlink_module_init(void) static int __init devlink_module_init(void)
{ {
return genl_register_family_with_ops_groups(&devlink_nl_family, return genl_register_family_with_ops_groups(&devlink_nl_family,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment