Commit 5a6e55c4 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Or Gerlitz says:

====================
Mellanox driver updates

This patch set from Jack Morgenstein does the following:

1. Fix MAC/VLAN SRIOV implementation, and add wrapper functions for VLAN allocation
   and de-allocation (patches 1-6).

2. Implements resource quotas when running under SRIOV (patches 7-10).
   Patch 7 is a small bug fix, and patches 8-10 implement the quotas.

Quotas are implemented per resource type for VFs and the PF, to prevent
any entity from simply grabbing all the resources for itself and leaving
the other entities unable to obtain such resources.

The series is against net-next commit ba486502 "ipv6: remove the unnecessary statement in find_match()"

changes from V0:
 - dropped the 1st patch which needs to go to -stable and hence through net,
   not net-next
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3b4c5cbf 146f3ef4
...@@ -177,18 +177,18 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -177,18 +177,18 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull; props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap; props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg, props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg); dev->dev->caps.max_rq_sg);
props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes; props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws; props->max_mr = dev->dev->quotas.mpt;
props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; props->max_srq = dev->dev->quotas.srq;
props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
props->max_srq_sge = dev->dev->caps.max_srq_sge; props->max_srq_sge = dev->dev->caps.max_srq_sge;
props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
......
...@@ -1687,7 +1687,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave ...@@ -1687,7 +1687,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) { if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev, __mlx4_unregister_vlan(&priv->dev,
port, vp_oper->vlan_idx); port, vp_oper->state.default_vlan);
vp_oper->vlan_idx = NO_INDX; vp_oper->vlan_idx = NO_INDX;
} }
if (NO_INDX != vp_oper->mac_idx) { if (NO_INDX != vp_oper->mac_idx) {
...@@ -1718,6 +1718,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, ...@@ -1718,6 +1718,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
if (cmd == MLX4_COMM_CMD_RESET) { if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave); mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false; slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
mlx4_master_deactivate_admin_state(priv, slave); mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1; slave_state[slave].event_eq[i].eqn = -1;
......
...@@ -417,7 +417,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, ...@@ -417,7 +417,6 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
int err; int err;
int idx;
en_dbg(HW, priv, "Killing VID:%d\n", vid); en_dbg(HW, priv, "Killing VID:%d\n", vid);
...@@ -425,10 +424,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, ...@@ -425,10 +424,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
/* Remove VID from port VLAN filter */ /* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) mlx4_unregister_vlan(mdev->dev, priv->port, vid);
mlx4_unregister_vlan(mdev->dev, priv->port, idx);
else
en_dbg(HW, priv, "could not find vid %d in cache\n", vid);
if (mdev->device_up && priv->port_up) { if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
......
...@@ -177,6 +177,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -177,6 +177,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd) struct mlx4_cmd_info *cmd)
{ {
struct mlx4_priv *priv = mlx4_priv(dev);
u8 field; u8 field;
u32 size; u32 size;
int err = 0; int err = 0;
...@@ -185,18 +186,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -185,18 +186,26 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
#define QUERY_FUNC_CAP_FMR_FLAG 0x80 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
/* when opcode modifier = 1 */ /* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
...@@ -237,8 +246,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -237,8 +246,9 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY);
} else if (vhcr->op_modifier == 0) { } else if (vhcr->op_modifier == 0) {
/* enable rdma and ethernet interfaces */ /* enable rdma and ethernet interfaces, and new quota locations */
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports; field = dev->caps.num_ports;
...@@ -250,14 +260,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -250,14 +260,20 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = 0; /* protected FMR support not available as yet */ field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
size = dev->caps.num_qps; size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
size = dev->caps.num_srqs; size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
size = dev->caps.num_srqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_cqs; size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
size = dev->caps.num_cqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
size = dev->caps.num_eqs; size = dev->caps.num_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
...@@ -265,14 +281,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, ...@@ -265,14 +281,19 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_eqs; size = dev->caps.reserved_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
size = dev->caps.num_mpts; size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
size = dev->caps.num_mpts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mtts; size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
size = dev->caps.num_mtts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms; size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
} else } else
err = -EINVAL; err = -EINVAL;
...@@ -287,7 +308,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, ...@@ -287,7 +308,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
u32 *outbox; u32 *outbox;
u8 field, op_modifier; u8 field, op_modifier;
u32 size; u32 size;
int err = 0; int err = 0, quotas = 0;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
...@@ -311,6 +332,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, ...@@ -311,6 +332,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
goto out; goto out;
} }
func_cap->flags = field; func_cap->flags = field;
quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field; func_cap->num_ports = field;
...@@ -318,29 +340,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, ...@@ -318,29 +340,50 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size; func_cap->pf_context_behaviour = size;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); if (quotas) {
func_cap->qp_quota = size & 0xFFFFFF; MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
func_cap->srq_quota = size & 0xFFFFFF; func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
func_cap->cq_quota = size & 0xFFFFFF; func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
} else {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
func_cap->mcg_quota = size & 0xFFFFFF;
}
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF; func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF; func_cap->reserved_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
goto out; goto out;
} }
......
...@@ -562,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) ...@@ -562,13 +562,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
} }
dev->caps.num_ports = func_cap.num_ports; dev->caps.num_ports = func_cap.num_ports;
dev->caps.num_qps = func_cap.qp_quota; dev->quotas.qp = func_cap.qp_quota;
dev->caps.num_srqs = func_cap.srq_quota; dev->quotas.srq = func_cap.srq_quota;
dev->caps.num_cqs = func_cap.cq_quota; dev->quotas.cq = func_cap.cq_quota;
dev->caps.num_eqs = func_cap.max_eq; dev->quotas.mpt = func_cap.mpt_quota;
dev->caps.reserved_eqs = func_cap.reserved_eq; dev->quotas.mtt = func_cap.mtt_quota;
dev->caps.num_mpts = func_cap.mpt_quota; dev->caps.num_qps = 1 << hca_param.log_num_qps;
dev->caps.num_mtts = func_cap.mtt_quota; dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
dev->caps.num_eqs = func_cap.max_eq;
dev->caps.reserved_eqs = func_cap.reserved_eq;
dev->caps.num_pds = MLX4_NUM_PDS; dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0; dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0; dev->caps.num_amgms = 0;
...@@ -2102,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) ...@@ -2102,9 +2106,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
"aborting.\n"); "aborting.\n");
return err; return err;
} }
if (num_vfs > MLX4_MAX_NUM_VF) {
printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
num_vfs, MLX4_MAX_NUM_VF); * per port, we must limit the number of VFs to 63 (since their are
* 128 MACs)
*/
if (num_vfs >= MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
"Requested more VF's (%d) than allowed (%d)\n",
num_vfs, MLX4_MAX_NUM_VF - 1);
return -EINVAL; return -EINVAL;
} }
...@@ -2322,6 +2332,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) ...@@ -2322,6 +2332,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (err) if (err)
goto err_steer; goto err_steer;
mlx4_init_quotas(dev);
for (port = 1; port <= dev->caps.num_ports; port++) { for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port); err = mlx4_init_port_info(dev, port);
if (err) if (err)
......
...@@ -455,6 +455,7 @@ struct mlx4_slave_state { ...@@ -455,6 +455,7 @@ struct mlx4_slave_state {
u8 last_cmd; u8 last_cmd;
u8 init_port_mask; u8 init_port_mask;
bool active; bool active;
bool old_vlan_api;
u8 function; u8 function;
dma_addr_t vhcr_dma; dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1]; u16 mtu[MLX4_MAX_PORTS + 1];
...@@ -503,12 +504,28 @@ struct slave_list { ...@@ -503,12 +504,28 @@ struct slave_list {
struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
}; };
struct resource_allocator {
spinlock_t alloc_lock; /* protect quotas */
union {
int res_reserved;
int res_port_rsvd[MLX4_MAX_PORTS];
};
union {
int res_free;
int res_port_free[MLX4_MAX_PORTS];
};
int *quota;
int *allocated;
int *guaranteed;
};
struct mlx4_resource_tracker { struct mlx4_resource_tracker {
spinlock_t lock; spinlock_t lock;
/* tree for each resources */ /* tree for each resources */
struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */ /* num_of_slave's lists, one per slave */
struct slave_list *slave_list; struct slave_list *slave_list;
struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
}; };
#define SLAVE_EVENT_EQ_SIZE 128 #define SLAVE_EVENT_EQ_SIZE 128
...@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev, ...@@ -1111,7 +1128,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
...@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) ...@@ -1252,4 +1269,6 @@ static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
void mlx4_init_quotas(struct mlx4_dev *dev);
#endif /* MLX4_H */ #endif /* MLX4_H */
...@@ -755,14 +755,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) ...@@ -755,14 +755,14 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
struct mlx4_mr_table *mr_table = &priv->mr_table; struct mlx4_mr_table *mr_table = &priv->mr_table;
int err; int err;
if (!is_power_of_2(dev->caps.num_mpts))
return -EINVAL;
/* Nothing to do for slaves - all MR handling is forwarded /* Nothing to do for slaves - all MR handling is forwarded
* to the master */ * to the master */
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
return 0; return 0;
if (!is_power_of_2(dev->caps.num_mpts))
return -EINVAL;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0); ~0, dev->caps.reserved_mrws, 0);
if (err) if (err)
......
...@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac); ...@@ -178,13 +178,24 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{ {
u64 out_param = 0; u64 out_param = 0;
int err; int err = -EINVAL;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&out_param, port); if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, err = mlx4_cmd_imm(dev, mac, &out_param,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, ((u32) port) << 8 | (u32) RES_MAC,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
if (err && err == -EINVAL && mlx4_is_slave(dev)) {
/* retry using old REG_MAC format */
set_param_l(&out_param, port);
err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
dev->flags |= MLX4_FLAG_OLD_REG_MAC;
}
if (err) if (err)
return err; return err;
...@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) ...@@ -231,10 +242,18 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
u64 out_param = 0; u64 out_param = 0;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&out_param, port); if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, (void) mlx4_cmd_imm(dev, mac, &out_param,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, ((u32) port) << 8 | (u32) RES_MAC,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
} else {
/* use old unregister mac format */
set_param_l(&out_param, port);
(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
return; return;
} }
__mlx4_unregister_mac(dev, port, mac); __mlx4_unregister_mac(dev, port, mac);
...@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, ...@@ -284,7 +303,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
...@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) ...@@ -370,9 +389,12 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
u64 out_param = 0; u64 out_param = 0;
int err; int err;
if (vlan > 4095)
return -EINVAL;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&out_param, port); err = mlx4_cmd_imm(dev, vlan, &out_param,
err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, ((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err) if (!err)
...@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) ...@@ -384,23 +406,26 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
} }
EXPORT_SYMBOL_GPL(mlx4_register_vlan); EXPORT_SYMBOL_GPL(mlx4_register_vlan);
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{ {
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int index;
if (index < MLX4_VLAN_REGULAR) { mutex_lock(&table->mutex);
mlx4_warn(dev, "Trying to free special vlan index %d\n", index); if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
return; mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
goto out;
} }
mutex_lock(&table->mutex); if (index < MLX4_VLAN_REGULAR) {
if (!table->refs[index]) { mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
mlx4_warn(dev, "No vlan entry for index %d\n", index);
goto out; goto out;
} }
if (--table->refs[index]) { if (--table->refs[index]) {
mlx4_dbg(dev, "Have more references for index %d," mlx4_dbg(dev, "Have %d more references for index %d,"
"no need to modify vlan table\n", index); "no need to modify vlan table\n", table->refs[index],
index);
goto out; goto out;
} }
table->entries[index] = 0; table->entries[index] = 0;
...@@ -410,23 +435,19 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) ...@@ -410,23 +435,19 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
} }
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{ {
u64 in_param = 0; u64 out_param = 0;
int err;
if (mlx4_is_mfunc(dev)) { if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, port); (void) mlx4_cmd_imm(dev, vlan, &out_param,
err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, ((u32) port) << 8 | (u32) RES_VLAN,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, RES_OP_RESERVE_AND_MAP,
MLX4_CMD_WRAPPED); MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
if (!err) MLX4_CMD_WRAPPED);
mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
index);
return; return;
} }
__mlx4_unregister_vlan(dev, port, index); __mlx4_unregister_vlan(dev, port, vlan);
} }
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
......
...@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) ...@@ -480,8 +480,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
*/ */
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
(1 << 23) - 1, dev->phys_caps.base_sqpn + 8 + (1 << 23) - 1, mlx4_num_reserved_sqps(dev),
16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
reserved_from_top); reserved_from_top);
if (err) if (err)
return err; return err;
......
...@@ -55,6 +55,14 @@ struct mac_res { ...@@ -55,6 +55,14 @@ struct mac_res {
u8 port; u8 port;
}; };
struct vlan_res {
struct list_head list;
u16 vlan;
int ref_count;
int vlan_index;
u8 port;
};
struct res_common { struct res_common {
struct list_head list; struct list_head list;
struct rb_node node; struct rb_node node;
...@@ -266,6 +274,7 @@ static const char *ResourceType(enum mlx4_resource rt) ...@@ -266,6 +274,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MPT: return "RES_MPT"; case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT"; case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC"; case RES_MAC: return "RES_MAC";
case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ"; case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER"; case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE"; case RES_FS_RULE: return "RES_FS_RULE";
...@@ -274,10 +283,139 @@ static const char *ResourceType(enum mlx4_resource rt) ...@@ -274,10 +283,139 @@ static const char *ResourceType(enum mlx4_resource rt)
}; };
} }
static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EINVAL;
int allocated, free, reserved, guaranteed, from_free;
if (slave > dev->num_vfs)
return -EINVAL;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
free = (port > 0) ? res_alloc->res_port_free[port - 1] :
res_alloc->res_free;
reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
if (allocated + count > res_alloc->quota[slave])
goto out;
if (allocated + count <= guaranteed) {
err = 0;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
from_free = count - (guaranteed - allocated);
else
from_free = count;
if (free - from_free > reserved)
err = 0;
}
if (!err) {
/* grant the request */
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
}
}
out:
spin_unlock(&res_alloc->alloc_lock);
return err;
}
static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
if (slave > dev->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
}
spin_unlock(&res_alloc->alloc_lock);
return;
}
static inline void initialize_res_quotas(struct mlx4_dev *dev,
struct resource_allocator *res_alloc,
enum mlx4_resource res_type,
int vf, int num_instances)
{
res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
if (vf == mlx4_master_func_num(dev)) {
res_alloc->res_free = num_instances;
if (res_type == RES_MTT) {
/* reserved mtts will be taken out of the PF allocation */
res_alloc->res_free += dev->caps.reserved_mtts;
res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
res_alloc->quota[vf] += dev->caps.reserved_mtts;
}
}
}
void mlx4_init_quotas(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int pf;
/* quotas for VFs are initialized in mlx4_slave_cap */
if (mlx4_is_slave(dev))
return;
if (!mlx4_is_mfunc(dev)) {
dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev);
dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
return;
}
pf = mlx4_master_func_num(dev);
dev->quotas.qp =
priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
dev->quotas.cq =
priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
dev->quotas.srq =
priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
dev->quotas.mtt =
priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev) int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
int i; int i, j;
int t; int t;
priv->mfunc.master.res_tracker.slave_list = priv->mfunc.master.res_tracker.slave_list =
...@@ -298,8 +436,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) ...@@ -298,8 +436,105 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
(dev->num_vfs + 1) * sizeof(int),
GFP_KERNEL);
else
res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->num_vfs + 1; t++) {
switch (i) {
case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
t, dev->caps.num_qps -
dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev));
break;
case RES_CQ:
initialize_res_quotas(dev, res_alloc, RES_CQ,
t, dev->caps.num_cqs -
dev->caps.reserved_cqs);
break;
case RES_SRQ:
initialize_res_quotas(dev, res_alloc, RES_SRQ,
t, dev->caps.num_srqs -
dev->caps.reserved_srqs);
break;
case RES_MPT:
initialize_res_quotas(dev, res_alloc, RES_MPT,
t, dev->caps.num_mpts -
dev->caps.reserved_mrws);
break;
case RES_MTT:
initialize_res_quotas(dev, res_alloc, RES_MTT,
t, dev->caps.num_mtts -
dev->caps.reserved_mtts);
break;
case RES_MAC:
if (t == mlx4_master_func_num(dev)) {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
}
break;
case RES_VLAN:
if (t == mlx4_master_func_num(dev)) {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
res_alloc->quota[t];
} else {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
res_alloc->guaranteed[t] = 0;
}
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
res_alloc->guaranteed[t] = 0;
if (t == mlx4_master_func_num(dev))
res_alloc->res_free = res_alloc->quota[t];
break;
default:
break;
}
if (i == RES_MAC || i == RES_VLAN) {
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_rsvd[j] +=
res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
}
}
spin_lock_init(&priv->mfunc.master.res_tracker.lock); spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0 ; return 0;
no_mem_err:
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
return -ENOMEM;
} }
void mlx4_free_resource_tracker(struct mlx4_dev *dev, void mlx4_free_resource_tracker(struct mlx4_dev *dev,
...@@ -309,13 +544,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev, ...@@ -309,13 +544,28 @@ void mlx4_free_resource_tracker(struct mlx4_dev *dev,
int i; int i;
if (priv->mfunc.master.res_tracker.slave_list) { if (priv->mfunc.master.res_tracker.slave_list) {
if (type != RES_TR_FREE_STRUCTS_ONLY) if (type != RES_TR_FREE_STRUCTS_ONLY) {
for (i = 0 ; i < dev->num_slaves; i++) for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL || if (type == RES_TR_FREE_ALL ||
dev->caps.function != i) dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i); mlx4_delete_all_resources_for_slave(dev, i);
}
/* free master's vlans */
i = dev->caps.function;
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
rem_slave_vlans(dev, i);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
if (type != RES_TR_FREE_SLAVES_ONLY) { if (type != RES_TR_FREE_SLAVES_ONLY) {
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
kfree(priv->mfunc.master.res_tracker.slave_list); kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL; priv->mfunc.master.res_tracker.slave_list = NULL;
} }
...@@ -1229,12 +1479,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1229,12 +1479,19 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
case RES_OP_RESERVE: case RES_OP_RESERVE:
count = get_param_l(&in_param); count = get_param_l(&in_param);
align = get_param_h(&in_param); align = get_param_h(&in_param);
err = __mlx4_qp_reserve_range(dev, count, align, &base); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err) if (err)
return err; return err;
err = __mlx4_qp_reserve_range(dev, count, align, &base);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
return err;
}
err = add_res_range(dev, slave, base, count, RES_QP, 0); err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) { if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count); __mlx4_qp_release_range(dev, base, count);
return err; return err;
} }
...@@ -1282,15 +1539,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1282,15 +1539,24 @@ static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err; return err;
order = get_param_l(&in_param); order = get_param_l(&in_param);
err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
if (err)
return err;
base = __mlx4_alloc_mtt_range(dev, order); base = __mlx4_alloc_mtt_range(dev, order);
if (base == -1) if (base == -1) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM; return -ENOMEM;
}
err = add_res_range(dev, slave, base, 1, RES_MTT, order); err = add_res_range(dev, slave, base, 1, RES_MTT, order);
if (err) if (err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order); __mlx4_free_mtt_range(dev, base, order);
else } else {
set_param_l(out_param, base); set_param_l(out_param, base);
}
return err; return err;
} }
...@@ -1305,13 +1571,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1305,13 +1571,20 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) { switch (op) {
case RES_OP_RESERVE: case RES_OP_RESERVE:
err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
if (err)
break;
index = __mlx4_mpt_reserve(dev); index = __mlx4_mpt_reserve(dev);
if (index == -1) if (index == -1) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break; break;
}
id = index & mpt_mask(dev); id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index); err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) { if (err) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index); __mlx4_mpt_release(dev, index);
break; break;
} }
...@@ -1345,12 +1618,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1345,12 +1618,19 @@ static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) { switch (op) {
case RES_OP_RESERVE_AND_MAP: case RES_OP_RESERVE_AND_MAP:
err = __mlx4_cq_alloc_icm(dev, &cqn); err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err) if (err)
break; break;
err = __mlx4_cq_alloc_icm(dev, &cqn);
if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
break;
}
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) { if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn); __mlx4_cq_free_icm(dev, cqn);
break; break;
} }
...@@ -1373,12 +1653,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1373,12 +1653,19 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
switch (op) { switch (op) {
case RES_OP_RESERVE_AND_MAP: case RES_OP_RESERVE_AND_MAP:
err = __mlx4_srq_alloc_icm(dev, &srqn); err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err) if (err)
break; break;
err = __mlx4_srq_alloc_icm(dev, &srqn);
if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
break;
}
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) { if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn); __mlx4_srq_free_icm(dev, srqn);
break; break;
} }
...@@ -1399,9 +1686,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) ...@@ -1399,9 +1686,13 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct mac_res *res; struct mac_res *res;
if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
return -EINVAL;
res = kzalloc(sizeof *res, GFP_KERNEL); res = kzalloc(sizeof *res, GFP_KERNEL);
if (!res) if (!res) {
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM; return -ENOMEM;
}
res->mac = mac; res->mac = mac;
res->port = (u8) port; res->port = (u8) port;
list_add_tail(&res->list, list_add_tail(&res->list,
...@@ -1421,6 +1712,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, ...@@ -1421,6 +1712,7 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
list_for_each_entry_safe(res, tmp, mac_list, list) { list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) { if (res->mac == mac && res->port == (u8) port) {
list_del(&res->list); list_del(&res->list);
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res); kfree(res);
break; break;
} }
...@@ -1438,12 +1730,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave) ...@@ -1438,12 +1730,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(res, tmp, mac_list, list) { list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list); list_del(&res->list);
__mlx4_unregister_mac(dev, res->port, res->mac); __mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res); kfree(res);
} }
} }
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param) u64 in_param, u64 *out_param, int in_port)
{ {
int err = -EINVAL; int err = -EINVAL;
int port; int port;
...@@ -1452,7 +1745,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1452,7 +1745,7 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE_AND_MAP) if (op != RES_OP_RESERVE_AND_MAP)
return err; return err;
port = get_param_l(out_param); port = !in_port ? get_param_l(out_param) : in_port;
mac = in_param; mac = in_param;
err = __mlx4_register_mac(dev, port, mac); err = __mlx4_register_mac(dev, port, mac);
...@@ -1469,12 +1762,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1469,12 +1762,114 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err; return err;
} }
static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
u64 in_param, u64 *out_param) int port, int vlan_index)
{ {
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
/* vlan found. update ref count */
++res->ref_count;
return 0;
}
}
if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
return -ENOMEM;
}
res->vlan = vlan;
res->port = (u8) port;
res->vlan_index = vlan_index;
res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_VLAN]);
return 0; return 0;
} }
static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
if (!--res->ref_count) {
list_del(&res->list);
mlx4_release_resource(dev, slave, RES_VLAN,
1, port);
kfree(res);
}
break;
}
}
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
int i;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
list_del(&res->list);
/* dereference the vlan the num times the slave referenced it */
for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_vlan(dev, res->port, res->vlan);
mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
kfree(res);
}
}
static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err;
u16 vlan;
int vlan_index;
int port;
port = !in_port ? get_param_l(out_param) : in_port;
if (!port || op != RES_OP_RESERVE_AND_MAP)
return -EINVAL;
/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true;
return 0;
}
vlan = (u16) in_param;
err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
if (!err) {
set_param_l(out_param, (u32) vlan_index);
err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
if (err)
__mlx4_unregister_vlan(dev, port, vlan);
}
return err;
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param) u64 in_param, u64 *out_param)
{ {
...@@ -1484,15 +1879,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1484,15 +1879,23 @@ static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (op != RES_OP_RESERVE) if (op != RES_OP_RESERVE)
return -EINVAL; return -EINVAL;
err = __mlx4_counter_alloc(dev, &index); err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err) if (err)
return err; return err;
err = __mlx4_counter_alloc(dev, &index);
if (err) {
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err) if (err) {
__mlx4_counter_free(dev, index); __mlx4_counter_free(dev, index);
else mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
} else {
set_param_l(out_param, index); set_param_l(out_param, index);
}
return err; return err;
} }
...@@ -1528,7 +1931,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1528,7 +1931,7 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
int err; int err;
int alop = vhcr->op_modifier; int alop = vhcr->op_modifier;
switch (vhcr->in_modifier) { switch (vhcr->in_modifier & 0xFF) {
case RES_QP: case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); vhcr->in_param, &vhcr->out_param);
...@@ -1556,12 +1959,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1556,12 +1959,14 @@ int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC: case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break; break;
case RES_VLAN: case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break; break;
case RES_COUNTER: case RES_COUNTER:
...@@ -1597,6 +2002,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1597,6 +2002,7 @@ static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, base, count, RES_QP, 0); err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err) if (err)
break; break;
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count); __mlx4_qp_release_range(dev, base, count);
break; break;
case RES_OP_MAP_ICM: case RES_OP_MAP_ICM:
...@@ -1634,8 +2040,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1634,8 +2040,10 @@ static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
base = get_param_l(&in_param); base = get_param_l(&in_param);
order = get_param_h(&in_param); order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order); err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
if (!err) if (!err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order); __mlx4_free_mtt_range(dev, base, order);
}
return err; return err;
} }
...@@ -1660,6 +2068,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1660,6 +2068,7 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err) if (err)
break; break;
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index); __mlx4_mpt_release(dev, index);
break; break;
case RES_OP_MAP_ICM: case RES_OP_MAP_ICM:
...@@ -1694,6 +2103,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1694,6 +2103,7 @@ static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err) if (err)
break; break;
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn); __mlx4_cq_free_icm(dev, cqn);
break; break;
...@@ -1718,6 +2128,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1718,6 +2128,7 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
if (err) if (err)
break; break;
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn); __mlx4_srq_free_icm(dev, srqn);
break; break;
...@@ -1730,14 +2141,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1730,14 +2141,14 @@ static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
} }
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param) u64 in_param, u64 *out_param, int in_port)
{ {
int port; int port;
int err = 0; int err = 0;
switch (op) { switch (op) {
case RES_OP_RESERVE_AND_MAP: case RES_OP_RESERVE_AND_MAP:
port = get_param_l(out_param); port = !in_port ? get_param_l(out_param) : in_port;
mac_del_from_slave(dev, slave, in_param, port); mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param); __mlx4_unregister_mac(dev, port, in_param);
break; break;
...@@ -1751,9 +2162,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1751,9 +2162,27 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
} }
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param) u64 in_param, u64 *out_param, int port)
{ {
return 0; struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
if (slave_state[slave].old_vlan_api)
return 0;
if (!port)
return -EINVAL;
vlan_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_vlan(dev, port, in_param);
break;
default:
err = -EINVAL;
break;
}
return err;
} }
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
...@@ -1771,6 +2200,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1771,6 +2200,7 @@ static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
return err; return err;
__mlx4_counter_free(dev, index); __mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err; return err;
} }
...@@ -1803,7 +2233,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1803,7 +2233,7 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
int err = -EINVAL; int err = -EINVAL;
int alop = vhcr->op_modifier; int alop = vhcr->op_modifier;
switch (vhcr->in_modifier) { switch (vhcr->in_modifier & 0xFF) {
case RES_QP: case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop, err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param); vhcr->in_param);
...@@ -1831,12 +2261,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, ...@@ -1831,12 +2261,14 @@ int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
case RES_MAC: case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop, err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break; break;
case RES_VLAN: case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param); vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break; break;
case RES_COUNTER: case RES_COUNTER:
...@@ -3498,6 +3930,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) ...@@ -3498,6 +3930,11 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_QP]); &tracker->res_tree[RES_QP]);
list_del(&qp->com.list); list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
if (!valid_reserved(dev, slave, qpn)) {
__mlx4_qp_release_range(dev, qpn, 1);
mlx4_release_resource(dev, slave,
RES_QP, 1, 0);
}
kfree(qp); kfree(qp);
state = 0; state = 0;
break; break;
...@@ -3569,6 +4006,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) ...@@ -3569,6 +4006,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_SRQ]); &tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list); list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_SRQ, 1, 0);
kfree(srq); kfree(srq);
state = 0; state = 0;
break; break;
...@@ -3635,6 +4074,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) ...@@ -3635,6 +4074,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_CQ]); &tracker->res_tree[RES_CQ]);
list_del(&cq->com.list); list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_CQ, 1, 0);
kfree(cq); kfree(cq);
state = 0; state = 0;
break; break;
...@@ -3698,6 +4139,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) ...@@ -3698,6 +4139,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MPT]); &tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list); list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_MPT, 1, 0);
kfree(mpt); kfree(mpt);
state = 0; state = 0;
break; break;
...@@ -3767,6 +4210,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) ...@@ -3767,6 +4210,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
&tracker->res_tree[RES_MTT]); &tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list); list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave, RES_MTT,
1 << mtt->order, 0);
kfree(mtt); kfree(mtt);
state = 0; state = 0;
break; break;
...@@ -3925,6 +4370,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) ...@@ -3925,6 +4370,7 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_del(&counter->com.list); list_del(&counter->com.list);
kfree(counter); kfree(counter);
__mlx4_counter_free(dev, index); __mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
} }
} }
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
...@@ -3964,7 +4410,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) ...@@ -3964,7 +4410,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
/*VLAN*/ rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave); rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave); rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave); rem_slave_qps(dev, slave);
...@@ -4081,7 +4527,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) ...@@ -4081,7 +4527,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix) NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port, __mlx4_unregister_vlan(&work->priv->dev, work->port,
work->orig_vlan_ix); work->orig_vlan_id);
out: out:
kfree(work); kfree(work);
return; return;
......
...@@ -54,6 +54,7 @@ enum { ...@@ -54,6 +54,7 @@ enum {
MLX4_FLAG_MASTER = 1 << 2, MLX4_FLAG_MASTER = 1 << 2,
MLX4_FLAG_SLAVE = 1 << 3, MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4, MLX4_FLAG_SRIOV = 1 << 4,
MLX4_FLAG_OLD_REG_MAC = 1 << 6,
}; };
enum { enum {
...@@ -640,12 +641,23 @@ struct mlx4_counter { ...@@ -640,12 +641,23 @@ struct mlx4_counter {
__be64 tx_bytes; __be64 tx_bytes;
}; };
struct mlx4_quotas {
int qp;
int cq;
int srq;
int mpt;
int mtt;
int counter;
int xrcd;
};
struct mlx4_dev { struct mlx4_dev {
struct pci_dev *pdev; struct pci_dev *pdev;
unsigned long flags; unsigned long flags;
unsigned long num_slaves; unsigned long num_slaves;
struct mlx4_caps caps; struct mlx4_caps caps;
struct mlx4_phys_caps phys_caps; struct mlx4_phys_caps phys_caps;
struct mlx4_quotas quotas;
struct radix_tree_root qp_table_tree; struct radix_tree_root qp_table_tree;
u8 rev_id; u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN]; char board_id[MLX4_BOARD_ID_LEN];
...@@ -771,6 +783,12 @@ static inline int mlx4_is_master(struct mlx4_dev *dev) ...@@ -771,6 +783,12 @@ static inline int mlx4_is_master(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_MASTER; return dev->flags & MLX4_FLAG_MASTER;
} }
static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
{
return dev->phys_caps.base_sqpn + 8 +
16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
}
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{ {
return (qpn < dev->phys_caps.base_sqpn + 8 + return (qpn < dev->phys_caps.base_sqpn + 8 +
...@@ -1078,7 +1096,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, ...@@ -1078,7 +1096,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit); u8 *pg, u16 *ratelimit);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey); int npages, u64 iova, u32 *lkey, u32 *rkey);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment