Commit 2de27f30 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Amir Vadai says:

====================
This series from Yan Burman adds support for unicast MAC address filtering and
ndo FDB operations.  It also includes some optimizations to loopback related
decisions and checks in the TX/RX fast path and one cleanup, all in separate
patches.

Today, when adding macvlan devices, the NIC goes into promiscuous mode, since
unicast MAC filtering is not supported. With these changes, macvlan devices can
be added without the penalty of promiscuous mode.

If for some reason adding a unicast address filter fails e.g as of missing space in
the HW mac table, the device forces itself into promiscuous mode (and out of this
forced state when enough space is available).

Also, now it is possible to have bridge under multi-function configuration that include
PF and VFs.  In order to use bridge over PF/VFs, VM MAC fdb entries must be added e.g.
using 'bridge fdb add' command.

Changes from v1 - based on more comments from Eric Dumazet:
* added failure handling when adding unicast address filter

Changes from v0 - based on comments from Eric Dumazet:
* Removed unneeded synchronize_rcu()
* Use kfree_rcu() instead of synchronize_rcu() + kfree()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c6edfe10 0ccddcd1
......@@ -712,16 +712,13 @@ static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
__be32 ipv4_dst)
{
#ifdef CONFIG_INET
__be64 be_mac = 0;
unsigned char mac[ETH_ALEN];
if (!ipv4_is_multicast(ipv4_dst)) {
if (cmd->fs.flow_type & FLOW_MAC_EXT) {
if (cmd->fs.flow_type & FLOW_MAC_EXT)
memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
} else {
be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
memcpy(&mac, &be_mac, ETH_ALEN);
}
else
memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
} else {
ip_eth_mc_map(ipv4_dst, mac);
}
......
......@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i;
}
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
/* Drop the packet if SRIOV is not enabled
* and not performing the selftest or flb disabled
*/
if (mlx4_is_mfunc(priv->mdev->dev) &&
!(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
/* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
* is requested
*/
if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
}
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
......
......@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac;
int factor = priv->cqe_factor;
if (!priv->port_up)
......@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
/* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing
* the selftest or flb disabled - drop the packet */
if (s_mac == priv->mac &&
!((dev->features & NETIF_F_LOOPBACK) ||
priv->validate_loopback))
goto next;
/* Check if we need to drop the packet if SRIOV is not enabled
* and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
struct ethhdr *ethh;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) +
frags[0].offset);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
struct hlist_node *n;
struct hlist_head *bucket;
unsigned int mac_hash;
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
rcu_read_lock();
hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source)) {
rcu_read_unlock();
goto next;
}
}
rcu_read_unlock();
}
}
/*
* Packet is OK - process it.
......
......@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
......@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
......
......@@ -640,7 +640,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
......
......@@ -1833,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
info->base_qpn =
dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
......
......@@ -653,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
struct mlx4_mac_entry {
u64 mac;
u64 reg_id;
};
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
......@@ -667,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16];
struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table;
struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
int base_qpn;
};
......@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
......
......@@ -198,7 +198,6 @@ enum cq_type {
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
......@@ -432,6 +431,21 @@ struct ethtool_flow_id {
u64 id;
};
enum {
MLX4_EN_FLAG_PROMISC = (1 << 0),
MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
/* whether we need to enable hardware loopback by putting dmac
* in Tx WQE
*/
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
/* whether we need to drop packets that hardware loopback-ed */
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
};
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
#define MLX4_EN_MAC_HASH_IDX 5
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
......@@ -472,7 +486,7 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
u64 mac;
unsigned char prev_mac[ETH_ALEN + 2];
int mac_index;
unsigned max_mtu;
int base_qpn;
......@@ -481,8 +495,6 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
......@@ -496,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct mcast_task;
struct work_struct rx_mode_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
......@@ -513,6 +525,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
......@@ -532,8 +545,18 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62),
};
struct mlx4_mac_entry {
struct hlist_node hlist;
unsigned char mac[ETH_ALEN + 2];
u64 reg_id;
struct rcu_head rcu;
};
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features);
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
......
......@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
u64 mac, int *qpn, u64 *reg_id)
{
__be64 be_mac;
int err;
mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
u64 mac, int qpn, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
__be64 be_mac;
qp.qpn = qpn;
mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
mlx4_err(dev, "Invalid steering mode.\n");
}
}
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
......@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL;
}
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
index = mlx4_register_mac(dev, port, mac);
if (index < 0) {
err = index;
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
(unsigned long long) mac);
return err;
}
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
if (err) {
mlx4_err(dev, "Failed to reserve qp for mac registration\n");
goto qp_err;
}
err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto alloc_err;
}
entry->mac = mac;
entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
return 0;
insert_err:
kfree(entry);
alloc_err:
mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
qp_err:
mlx4_unregister_mac(dev, port, mac);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_entry *entry;
mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
mlx4_uc_steer_release(dev, port, entry->mac,
qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
}
}
}
EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
......@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
{
return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << dev->caps.log_num_macs);
}
EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
......@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn;
int err = 0;
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
mlx4_uc_steer_release(dev, port, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
err = mlx4_uc_steer_add(dev, port, entry->mac,
&qpn, &entry->reg_id);
return err;
}
/* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
......@@ -439,7 +262,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
mutex_unlock(&table->mutex);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
......
......@@ -956,9 +956,8 @@ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mo
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment