Commit 38baf3a6 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2017-04-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2017-04-22

This series contains some mlx5 fixes for net.

For your convenience, the series doesn't introduce any conflict with
the ongoing net-next pull request.

Please pull and let me know if there's any problem.

For -stable:
("net/mlx5: E-Switch, Correctly deal with inline mode on ConnectX-5") kernels >= 4.10
("net/mlx5e: Fix ETHTOOL_GRXCLSRLALL handling") kernels >= 4.8
("net/mlx5e: Fix small packet threshold")       kernels >= 4.7
("net/mlx5: Fix driver load bad flow when having fw initializing timeout") kernels >= 4.4
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fc1f8f4f 5e82c9e4
...@@ -90,7 +90,7 @@ ...@@ -90,7 +90,7 @@
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) #define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
#define MLX5_UMR_ALIGN (2048) #define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_DEFAULT_LRO_TIMEOUT 32 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
......
...@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i ...@@ -564,6 +564,7 @@ int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *i
int idx = 0; int idx = 0;
int err = 0; int err = 0;
info->data = MAX_NUM_OF_ETHTOOL_RULES;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) { while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
err = mlx5e_ethtool_get_flow(priv, info, location); err = mlx5e_ethtool_get_flow(priv, info, location);
if (!err) if (!err)
......
...@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, ...@@ -639,7 +639,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) && if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
rep->vport != FDB_UPLINK_VPORT) { rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) { if (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < min_inline) {
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n", "Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode); min_inline, esw->offloads.inline_mode);
...@@ -785,8 +786,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -785,8 +786,8 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return 0; return 0;
} }
static int gen_vxlan_header_ipv4(struct net_device *out_dev, static void gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[], char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN], unsigned char h_dest[ETH_ALEN],
int ttl, int ttl,
__be32 daddr, __be32 daddr,
...@@ -794,7 +795,6 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, ...@@ -794,7 +795,6 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
__be16 udp_dst_port, __be16 udp_dst_port,
__be32 vx_vni) __be32 vx_vni)
{ {
int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf; struct ethhdr *eth = (struct ethhdr *)buf;
struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
...@@ -817,12 +817,10 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev, ...@@ -817,12 +817,10 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
udp->dest = udp_dst_port; udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI; vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni); vxh->vx_vni = vxlan_vni_field(vx_vni);
return encap_size;
} }
static int gen_vxlan_header_ipv6(struct net_device *out_dev, static void gen_vxlan_header_ipv6(struct net_device *out_dev,
char buf[], char buf[], int encap_size,
unsigned char h_dest[ETH_ALEN], unsigned char h_dest[ETH_ALEN],
int ttl, int ttl,
struct in6_addr *daddr, struct in6_addr *daddr,
...@@ -830,7 +828,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev, ...@@ -830,7 +828,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
__be16 udp_dst_port, __be16 udp_dst_port,
__be32 vx_vni) __be32 vx_vni)
{ {
int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
struct ethhdr *eth = (struct ethhdr *)buf; struct ethhdr *eth = (struct ethhdr *)buf;
struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
...@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev, ...@@ -852,8 +849,6 @@ static int gen_vxlan_header_ipv6(struct net_device *out_dev,
udp->dest = udp_dst_port; udp->dest = udp_dst_port;
vxh->vx_flags = VXLAN_HF_VNI; vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(vx_vni); vxh->vx_vni = vxlan_vni_field(vx_vni);
return encap_size;
} }
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
...@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -862,13 +857,20 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev) struct net_device **out_dev)
{ {
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key; struct ip_tunnel_key *tun_key = &e->tun_info.key;
int encap_size, ttl, err;
struct neighbour *n = NULL; struct neighbour *n = NULL;
struct flowi4 fl4 = {}; struct flowi4 fl4 = {};
char *encap_header; char *encap_header;
int ttl, err;
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
return -EOPNOTSUPP;
}
encap_header = kzalloc(max_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header) if (!encap_header)
return -ENOMEM; return -ENOMEM;
...@@ -903,8 +905,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -903,8 +905,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_HEADER_TYPE_VXLAN:
encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header, gen_vxlan_header_ipv4(*out_dev, encap_header,
e->h_dest, ttl, ipv4_encap_size, e->h_dest, ttl,
fl4.daddr, fl4.daddr,
fl4.saddr, tun_key->tp_dst, fl4.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id)); tunnel_id_to_key32(tun_key->tun_id));
...@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, ...@@ -915,7 +917,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
} }
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id); ipv4_encap_size, encap_header, &e->encap_id);
out: out:
if (err && n) if (err && n)
neigh_release(n); neigh_release(n);
...@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -930,13 +932,20 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
{ {
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key; struct ip_tunnel_key *tun_key = &e->tun_info.key;
int encap_size, err, ttl = 0;
struct neighbour *n = NULL; struct neighbour *n = NULL;
struct flowi6 fl6 = {}; struct flowi6 fl6 = {};
char *encap_header; char *encap_header;
int err, ttl = 0;
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
return -EOPNOTSUPP;
}
encap_header = kzalloc(max_encap_size, GFP_KERNEL); encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header) if (!encap_header)
return -ENOMEM; return -ENOMEM;
...@@ -972,8 +981,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -972,8 +981,8 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
switch (e->tunnel_type) { switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN: case MLX5_HEADER_TYPE_VXLAN:
encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header, gen_vxlan_header_ipv6(*out_dev, encap_header,
e->h_dest, ttl, ipv6_encap_size, e->h_dest, ttl,
&fl6.daddr, &fl6.daddr,
&fl6.saddr, tun_key->tp_dst, &fl6.saddr, tun_key->tp_dst,
tunnel_id_to_key32(tun_key->tun_id)); tunnel_id_to_key32(tun_key->tun_id));
...@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, ...@@ -984,7 +993,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
} }
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id); ipv6_encap_size, encap_header, &e->encap_id);
out: out:
if (err && n) if (err && n)
neigh_release(n); neigh_release(n);
......
...@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) ...@@ -911,8 +911,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_eswitch *esw = dev->priv.eswitch;
int num_vports = esw->enabled_vports; int num_vports = esw->enabled_vports;
int err; int err, vport;
int vport;
u8 mlx5_mode; u8 mlx5_mode;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager))
...@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) ...@@ -921,9 +920,17 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
if (esw->mode == SRIOV_NONE) if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) != switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
return 0;
/* fall through */
case MLX5_CAP_INLINE_MODE_L2:
esw_warn(dev, "Inline mode can't be set\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (esw->offloads.num_flows > 0) { if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set inline mode when flows are configured\n"); esw_warn(dev, "Can't set inline mode when flows are configured\n");
...@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) ...@@ -966,18 +973,14 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (esw->mode == SRIOV_NONE) if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
} }
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
{ {
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
int vport; int vport;
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) ...@@ -985,10 +988,18 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
if (esw->mode == SRIOV_NONE) if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) != switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
return -EOPNOTSUPP; mlx5_mode = MLX5_INLINE_MODE_NONE;
goto out;
case MLX5_CAP_INLINE_MODE_L2:
mlx5_mode = MLX5_INLINE_MODE_L2;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
goto query_vports;
}
query_vports:
for (vport = 1; vport <= nvfs; vport++) { for (vport = 1; vport <= nvfs; vport++) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (vport > 1 && prev_mlx5_mode != mlx5_mode) if (vport > 1 && prev_mlx5_mode != mlx5_mode)
...@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) ...@@ -996,6 +1007,7 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
prev_mlx5_mode = mlx5_mode; prev_mlx5_mode = mlx5_mode;
} }
out:
*mode = mlx5_mode; *mode = mlx5_mode;
return 0; return 0;
} }
......
...@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1029,7 +1029,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (err) { if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI); FW_INIT_TIMEOUT_MILI);
goto out_err; goto err_cmd_cleanup;
} }
err = mlx5_core_enable_hca(dev, 0); err = mlx5_core_enable_hca(dev, 0);
......
...@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref) ...@@ -87,6 +87,7 @@ static void up_rel_func(struct kref *kref)
struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count); struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
list_del(&up->list); list_del(&up->list);
iounmap(up->map);
if (mlx5_cmd_free_uar(up->mdev, up->index)) if (mlx5_cmd_free_uar(up->mdev, up->index))
mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
kfree(up->reg_bitmap); kfree(up->reg_bitmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment