Commit 7319a1a4 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Or Gerlitz says:

====================
mlx4 SRIOV fixes

This series contains few SRIOV related fixes from Matan, please apply to net.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a0e2c822 09e05c3f
......@@ -1680,7 +1680,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
goto unlock;
update_params.smac_index = new_smac_index;
if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
&update_params)) {
release_mac = new_smac;
goto unlock;
......
......@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+ 1;
int max_port = min_port +
bitmap_weight(actv_ports.ports, dev->caps.num_ports);
if (port < min_port)
port = min_port;
else if (port >= max_port)
port = max_port - 1;
return port;
}
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
......@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
......@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if ((0 == vlan) && (0 == qos))
......@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
port = mlx4_slaves_closest_port(dev, slave, port);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
......@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;
......@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get current link state */
......
......@@ -390,13 +390,14 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *cmd;
u64 pri_addr_path_mask = 0;
u64 qp_mask = 0;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
......@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
if (attr & MLX4_UPDATE_QP_VSD) {
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
}
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
cmd->qp_mask = cpu_to_be64(qp_mask);
err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
......
......@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
......@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
/* force strip vlan by clear vsd */
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
if (qp_type == MLX4_QP_ST_UD ||
(qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
*(__be32 *)inbox->buf =
cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
MLX4_QP_OPTPAR_VLAN_STRIPPING);
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
} else {
struct mlx4_update_qp_params params = {.flags = 0};
mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
}
}
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
......@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
}
port = (rqp->sched_queue >> 6 & 1) + 1;
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
}
}
err = mlx4_cmd(dev, inbox->dma,
......@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
......
......@@ -209,6 +209,7 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
enum mlx4_event {
......
......@@ -56,7 +56,8 @@ enum mlx4_qp_optpar {
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
};
enum mlx4_qp_state {
......@@ -423,13 +424,20 @@ struct mlx4_wqe_inline_seg {
enum mlx4_update_qp_attr {
MLX4_UPDATE_QP_SMAC = 1 << 0,
MLX4_UPDATE_QP_VSD = 1 << 2,
MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
};
enum mlx4_update_qp_params_flags {
MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
};
struct mlx4_update_qp_params {
u8 smac_index;
u32 flags;
};
int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment