Commit 58d7553d authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Merge mlx4 bug fixes from Amir Vadai.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1b13c97f 3484aac1
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/mlx4/driver.h> #include <linux/mlx4/driver.h>
#include <linux/in.h>
#include <net/ip.h>
#include "mlx4_en.h" #include "mlx4_en.h"
#include "en_port.h" #include "en_port.h"
...@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev, ...@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
port_up = 1; port_up = 1;
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 1);
} }
mlx4_en_free_resources(priv); mlx4_en_free_resources(priv);
...@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev, ...@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
port_up = 1; port_up = 1;
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 1);
} }
priv->prof->rss_rings = rss_rings; priv->prof->rss_rings = rss_rings;
...@@ -664,27 +666,88 @@ static int mlx4_en_validate_flow(struct net_device *dev, ...@@ -664,27 +666,88 @@ static int mlx4_en_validate_flow(struct net_device *dev,
if ((cmd->fs.flow_type & FLOW_EXT)) { if ((cmd->fs.flow_type & FLOW_EXT)) {
if (cmd->fs.m_ext.vlan_etype || if (cmd->fs.m_ext.vlan_etype ||
!(cmd->fs.m_ext.vlan_tci == 0 || !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff))) 0 ||
(cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL; return -EINVAL;
if (cmd->fs.m_ext.vlan_tci) {
if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
return -EINVAL;
}
} }
return 0; return 0;
} }
static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct list_head *rule_list_h,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
int err = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
if ((cmd->fs.flow_type & FLOW_EXT) &&
(cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
}
list_add_tail(&spec_l2->list, rule_list_h);
return err;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *rule_list_h,
struct mlx4_spec_list *spec_l2,
__be32 ipv4_dst)
{
__be64 be_mac = 0;
unsigned char mac[ETH_ALEN];
if (!ipv4_is_multicast(ipv4_dst)) {
if (cmd->fs.flow_type & FLOW_MAC_EXT) {
memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
} else {
be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
memcpy(&mac, &be_mac, ETH_ALEN);
}
} else {
ip_eth_mc_map(ipv4_dst, mac);
}
return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
}
static int add_ip_rule(struct mlx4_en_priv *priv, static int add_ip_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd, struct ethtool_rxnfc *cmd,
struct list_head *list_h) struct list_head *list_h)
{ {
struct mlx4_spec_list *spec_l3; struct mlx4_spec_list *spec_l2 = NULL;
struct mlx4_spec_list *spec_l3 = NULL;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
if (!spec_l3) { spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
if (!spec_l2 || !spec_l3) {
en_err(priv, "Fail to alloc ethtool rule.\n"); en_err(priv, "Fail to alloc ethtool rule.\n");
kfree(spec_l2);
kfree(spec_l3);
return -ENOMEM; return -ENOMEM;
} }
mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
cmd->fs.h_u.
usr_ip4_spec.ip4dst);
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
if (l3_mask->ip4src) if (l3_mask->ip4src)
...@@ -701,14 +764,17 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv, ...@@ -701,14 +764,17 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd, struct ethtool_rxnfc *cmd,
struct list_head *list_h, int proto) struct list_head *list_h, int proto)
{ {
struct mlx4_spec_list *spec_l3; struct mlx4_spec_list *spec_l2 = NULL;
struct mlx4_spec_list *spec_l4; struct mlx4_spec_list *spec_l3 = NULL;
struct mlx4_spec_list *spec_l4 = NULL;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL); spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
if (!spec_l4 || !spec_l3) { spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
if (!spec_l2 || !spec_l3 || !spec_l4) {
en_err(priv, "Fail to alloc ethtool rule.\n"); en_err(priv, "Fail to alloc ethtool rule.\n");
kfree(spec_l2);
kfree(spec_l3); kfree(spec_l3);
kfree(spec_l4); kfree(spec_l4);
return -ENOMEM; return -ENOMEM;
...@@ -717,12 +783,20 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv, ...@@ -717,12 +783,20 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
if (proto == TCP_V4_FLOW) { if (proto == TCP_V4_FLOW) {
mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
spec_l2,
cmd->fs.h_u.
tcp_ip4_spec.ip4dst);
spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
} else { } else {
mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
spec_l2,
cmd->fs.h_u.
udp_ip4_spec.ip4dst);
spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
...@@ -751,43 +825,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, ...@@ -751,43 +825,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct list_head *rule_list_h) struct list_head *rule_list_h)
{ {
int err; int err;
__be64 be_mac;
struct ethhdr *eth_spec; struct ethhdr *eth_spec;
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_spec_list *spec_l2; struct mlx4_spec_list *spec_l2;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); struct mlx4_en_priv *priv = netdev_priv(dev);
err = mlx4_en_validate_flow(dev, cmd); err = mlx4_en_validate_flow(dev, cmd);
if (err) if (err)
return err; return err;
spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
if (!spec_l2)
return -ENOMEM;
if (cmd->fs.flow_type & FLOW_MAC_EXT) {
memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
} else {
u64 mac = priv->mac & MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
}
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
}
list_add_tail(&spec_l2->list, rule_list_h);
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW: case ETHER_FLOW:
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
if (!spec_l2)
return -ENOMEM;
eth_spec = &cmd->fs.h_u.ether_spec; eth_spec = &cmd->fs.h_u.ether_spec;
memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN); mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
&eth_spec->h_dest[0]);
spec_l2->eth.ether_type = eth_spec->h_proto; spec_l2->eth.ether_type = eth_spec->h_proto;
if (eth_spec->h_proto) if (eth_spec->h_proto)
spec_l2->eth.ether_type_enable = 1; spec_l2->eth.ether_type_enable = 1;
...@@ -861,6 +915,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, ...@@ -861,6 +915,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = 0; loc_rule->id = 0;
memset(&loc_rule->flow_spec, 0, memset(&loc_rule->flow_spec, 0,
sizeof(struct ethtool_rx_flow_spec)); sizeof(struct ethtool_rx_flow_spec));
list_del(&loc_rule->list);
} }
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) { if (err) {
...@@ -871,6 +926,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, ...@@ -871,6 +926,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = reg_id; loc_rule->id = reg_id;
memcpy(&loc_rule->flow_spec, &cmd->fs, memcpy(&loc_rule->flow_spec, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec)); sizeof(struct ethtool_rx_flow_spec));
list_add_tail(&loc_rule->list, &priv->ethtool_list);
out_free_list: out_free_list:
list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
...@@ -904,6 +960,7 @@ static int mlx4_en_flow_detach(struct net_device *dev, ...@@ -904,6 +960,7 @@ static int mlx4_en_flow_detach(struct net_device *dev,
} }
rule->id = 0; rule->id = 0;
memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
list_del(&rule->list);
out: out:
return err; return err;
...@@ -952,7 +1009,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ...@@ -952,7 +1009,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
cmd->cmd == ETHTOOL_GRXCLSRULE || cmd->cmd == ETHTOOL_GRXCLSRULE ||
cmd->cmd == ETHTOOL_GRXCLSRLALL) && cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) (mdev->dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
return -EINVAL; return -EINVAL;
switch (cmd->cmd) { switch (cmd->cmd) {
...@@ -988,7 +1046,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -988,7 +1046,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) if (mdev->dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
return -EINVAL; return -EINVAL;
switch (cmd->cmd) { switch (cmd->cmd) {
...@@ -1037,7 +1096,7 @@ static int mlx4_en_set_channels(struct net_device *dev, ...@@ -1037,7 +1096,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
port_up = 1; port_up = 1;
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 1);
} }
mlx4_en_free_resources(priv); mlx4_en_free_resources(priv);
......
...@@ -1039,6 +1039,9 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1039,6 +1039,9 @@ int mlx4_en_start_port(struct net_device *dev)
INIT_LIST_HEAD(&priv->mc_list); INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list); INIT_LIST_HEAD(&priv->curr_list);
INIT_LIST_HEAD(&priv->ethtool_list);
memset(&priv->ethtool_rules[0], 0,
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */ /* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu); dev->mtu = min(dev->mtu, priv->max_mtu);
...@@ -1175,6 +1178,8 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1175,6 +1178,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->port_up = true; priv->port_up = true;
netif_tx_start_all_queues(dev); netif_tx_start_all_queues(dev);
netif_device_attach(dev);
return 0; return 0;
tx_err: tx_err:
...@@ -1197,11 +1202,12 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1197,11 +1202,12 @@ int mlx4_en_start_port(struct net_device *dev)
} }
void mlx4_en_stop_port(struct net_device *dev) void mlx4_en_stop_port(struct net_device *dev, int detach)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp; struct mlx4_en_mc_list *mclist, *tmp;
struct ethtool_flow_id *flow, *tmp_flow;
int i; int i;
u8 mc_list[16] = {0}; u8 mc_list[16] = {0};
...@@ -1212,9 +1218,13 @@ void mlx4_en_stop_port(struct net_device *dev) ...@@ -1212,9 +1218,13 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Synchronize with tx routine */ /* Synchronize with tx routine */
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
if (detach)
netif_device_detach(dev);
netif_tx_stop_all_queues(dev); netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev); netif_tx_unlock_bh(dev);
netif_tx_disable(dev);
/* Set port as not active */ /* Set port as not active */
priv->port_up = false; priv->port_up = false;
...@@ -1281,7 +1291,19 @@ void mlx4_en_stop_port(struct net_device *dev) ...@@ -1281,7 +1291,19 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Unregister Mac address for the port */ /* Unregister Mac address for the port */
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
mdev->mac_removed[priv->port] = 1; if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Remove flow steering rules for the port*/
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
ASSERT_RTNL();
list_for_each_entry_safe(flow, tmp_flow,
&priv->ethtool_list, list) {
mlx4_flow_detach(mdev->dev, flow->id);
list_del(&flow->list);
}
}
/* Free RX Rings */ /* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
...@@ -1307,7 +1329,7 @@ static void mlx4_en_restart(struct work_struct *work) ...@@ -1307,7 +1329,7 @@ static void mlx4_en_restart(struct work_struct *work)
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 1);
for (i = 0; i < priv->tx_ring_num; i++) for (i = 0; i < priv->tx_ring_num; i++)
netdev_tx_reset_queue(priv->tx_ring[i].tx_queue); netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
if (mlx4_en_start_port(dev)) if (mlx4_en_start_port(dev))
...@@ -1379,7 +1401,7 @@ static int mlx4_en_close(struct net_device *dev) ...@@ -1379,7 +1401,7 @@ static int mlx4_en_close(struct net_device *dev)
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev); netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock); mutex_unlock(&mdev->state_lock);
...@@ -1517,7 +1539,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1517,7 +1539,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* the port */ * the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n"); en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else { } else {
mlx4_en_stop_port(dev); mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev); err = mlx4_en_start_port(dev);
if (err) { if (err) {
en_err(priv, "Failed restarting port:%d\n", en_err(priv, "Failed restarting port:%d\n",
......
...@@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) ...@@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support", [0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support", [1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support", [2] = "RSS XOR Hash Function support",
[3] = "Device manage flow steering support" [3] = "Device manage flow steering support",
[4] = "Automatic mac reassignment support"
}; };
int i; int i;
...@@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
dev_cap->flags2 = 0; dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
...@@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_BMME_FLAGS_OFFSET); QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox, MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET); QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox, MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
...@@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) ...@@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* Enable Ethernet flow steering /* Enable Ethernet flow steering
* with udp unicast and tcp unicast * with udp unicast and tcp unicast
*/ */
MLX4_PUT(inbox, param->fs_hash_enable_bits, MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_ETH_BITS_OFFSET); INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering /* Enable IPoIB flow steering
* with udp unicast and tcp unicast * with udp unicast and tcp unicast
*/ */
MLX4_PUT(inbox, param->fs_hash_enable_bits, MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_IB_BITS_OFFSET); INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
......
...@@ -171,7 +171,6 @@ struct mlx4_init_hca_param { ...@@ -171,7 +171,6 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz; u8 log_mpt_sz;
u8 log_uar_sz; u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */ u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled; u64 dev_cap_enabled;
}; };
......
...@@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) ...@@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (mlx4_is_master(dev)) if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev); mlx4_parav_master_pf_caps(dev);
priv->fs_hash_mode = MLX4_FS_L2_HASH;
switch (priv->fs_hash_mode) {
case MLX4_FS_L2_HASH:
init_hca.fs_hash_enable_bits = 0;
break;
case MLX4_FS_L2_L3_L4_HASH:
/* Enable flow steering with
* udp unicast and tcp unicast
*/
init_hca.fs_hash_enable_bits =
MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
break;
}
profile = default_profile; profile = default_profile;
if (dev->caps.steering_mode == if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED)
......
...@@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, ...@@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
dw |= ctrl->priority << 16; dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw); hw->ctrl = cpu_to_be32(dw);
hw->vf_vep_port = cpu_to_be32(ctrl->port); hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn); hw->qpn = cpu_to_be32(ctrl->qpn);
} }
...@@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
.priority = MLX4_DOMAIN_NIC, .priority = MLX4_DOMAIN_NIC,
}; };
rule.allow_loopback = ~block_mcast_loopback; rule.allow_loopback = !block_mcast_loopback;
rule.port = port; rule.port = port;
rule.qpn = qp->qpn; rule.qpn = qp->qpn;
INIT_LIST_HEAD(&rule.list); INIT_LIST_HEAD(&rule.list);
......
...@@ -60,11 +60,6 @@ ...@@ -60,11 +60,6 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17) #define MLX4_FS_NUM_MCG (1 << 17)
enum {
MLX4_FS_L2_HASH = 0,
MLX4_FS_L2_L3_L4_HASH,
};
#define MLX4_NUM_UP 8 #define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8 #define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ #define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
...@@ -696,9 +691,12 @@ struct mlx4_steer { ...@@ -696,9 +691,12 @@ struct mlx4_steer {
struct mlx4_net_trans_rule_hw_ctrl { struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl; __be32 ctrl;
__be32 vf_vep_port; u8 rsvd1;
u8 funcid;
u8 vep;
u8 port;
__be32 qpn; __be32 qpn;
__be32 reserved; __be32 rsvd2;
}; };
struct mlx4_net_trans_rule_hw_ib { struct mlx4_net_trans_rule_hw_ib {
......
...@@ -427,6 +427,7 @@ struct mlx4_en_frag_info { ...@@ -427,6 +427,7 @@ struct mlx4_en_frag_info {
#endif #endif
struct ethtool_flow_id { struct ethtool_flow_id {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec; struct ethtool_rx_flow_spec flow_spec;
u64 id; u64 id;
}; };
...@@ -441,6 +442,8 @@ struct mlx4_en_priv { ...@@ -441,6 +442,8 @@ struct mlx4_en_priv {
struct mlx4_en_port_state port_state; struct mlx4_en_port_state port_state;
spinlock_t stats_lock; spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
/* To allow rules removal while port is going down */
struct list_head ethtool_list;
unsigned long last_moder_packets[MAX_RX_RINGS]; unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets; unsigned long last_moder_tx_packets;
...@@ -536,7 +539,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -536,7 +539,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof); struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev); int mlx4_en_start_port(struct net_device *dev);
void mlx4_en_stop_port(struct net_device *dev); void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv); void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
......
...@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, ...@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
port = be32_to_cpu(ctrl->vf_vep_port) & 0xff; port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */ /* Clear a space in the inbox for eth header */
......
...@@ -150,7 +150,8 @@ enum { ...@@ -150,7 +150,8 @@ enum {
MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0, MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1, MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2, MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3 MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment