Commit efad54a1 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Saeed Mahameed says:

====================
Mellanox mlx5 fixes 2017-03-21

This series contains some mlx5 core and ethernet driver fixes.

For -stable:
net/mlx5e: Count LRO packets correctly (for kernel >= 4.2)
net/mlx5e: Count GSO packets correctly (for kernel >= 4.2)
net/mlx5: Increase number of max QPs in default profile (for kernel >= 4.0)
net/mlx5e: Avoid supporting udp tunnel port ndo for VF reps (for kernel >= 4.10)
net/mlx5e: Use the proper UAPI values when offloading TC vlan actions (for kernel >= v4.9)
net/mlx5: E-Switch, Don't allow changing inline mode when flows are configured (for kernel >= 4.10)
net/mlx5e: Change the TC offload rule add/del code path to be per NIC or E-Switch (for kernel >= 4.10)
net/mlx5: Add missing entries for set/query rate limit commands (for kernel >= 4.8)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bf601fe5 8ab7e2ae
...@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_CONFIG_INT_MODERATION:
...@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command) ...@@ -497,6 +499,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(ALLOC_PD); MLX5_COMMAND_STR_CASE(ALLOC_PD);
MLX5_COMMAND_STR_CASE(DEALLOC_PD); MLX5_COMMAND_STR_CASE(DEALLOC_PD);
MLX5_COMMAND_STR_CASE(ALLOC_UAR); MLX5_COMMAND_STR_CASE(ALLOC_UAR);
......
...@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); ...@@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti);
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp); void *sp);
......
...@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, ...@@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats); vf_stats);
} }
void mlx5e_add_vxlan_port(struct net_device *netdev, static void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti) struct udp_tunnel_info *ti)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev, ...@@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
} }
void mlx5e_del_vxlan_port(struct net_device *netdev, static void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti) struct udp_tunnel_info *ti)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
......
...@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { ...@@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats,
}; };
......
...@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (lro_num_seg > 1) { if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
rq->stats.packets += lro_num_seg - 1;
rq->stats.lro_packets++; rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt; rq->stats.lro_bytes += cqe_bcnt;
} }
......
...@@ -133,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -133,6 +133,23 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule; return rule;
} }
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_fc *counter = NULL;
if (!IS_ERR(flow->rule)) {
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
}
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
}
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) { struct mlx5e_tc_flow *flow);
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct list_head *next = flow->encap.next; struct list_head *next = flow->encap.next;
list_del(&flow->encap); list_del(&flow->encap);
...@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, ...@@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
struct mlx5_fc *counter = NULL; mlx5e_tc_del_fdb_flow(priv, flow);
else
if (!IS_ERR(flow->rule)) { mlx5e_tc_del_nic_flow(priv, flow);
counter = mlx5_flow_rule_counter(flow->rule);
mlx5_del_flow_rules(flow->rule);
mlx5_fc_destroy(priv->mdev, counter);
}
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
}
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
} }
static void parse_vxlan_attr(struct mlx5_flow_spec *spec, static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
...@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector, skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_PORTS, FLOW_DISSECTOR_KEY_ENC_PORTS,
f->mask); f->mask);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
/* Full udp dst port must be given */ /* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
goto vxlan_match_offload_err; goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f); parse_vxlan_attr(spec, f);
else { else {
...@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
unsigned short family = ip_tunnel_info_af(tun_info); unsigned short family = ip_tunnel_info_af(tun_info);
struct ip_tunnel_key *key = &tun_info->key; struct ip_tunnel_key *key = &tun_info->key;
struct mlx5_encap_entry *e; struct mlx5_encap_entry *e;
...@@ -996,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -996,7 +1020,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
tunnel_type = MLX5_HEADER_TYPE_VXLAN; tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else { } else {
...@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
} }
if (is_tcf_vlan(a)) { if (is_tcf_vlan(a)) {
if (tcf_vlan_action(a) == VLAN_F_POP) { if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == VLAN_F_PUSH) { } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
attr->vlan = tcf_vlan_push_vid(a); attr->vlan = tcf_vlan_push_vid(a);
} else { /* action is TCA_VLAN_ACT_MODIFY */
return -EOPNOTSUPP;
} }
continue; continue;
} }
......
...@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.tso_bytes += skb->len - ihs; sq->stats.tso_bytes += skb->len - ihs;
} }
sq->stats.packets += skb_shinfo(skb)->gso_segs;
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
} else { } else {
bf = sq->bf_budget && bf = sq->bf_budget &&
!skb->xmit_more && !skb->xmit_more &&
!skb_shinfo(skb)->nr_frags; !skb_shinfo(skb)->nr_frags;
ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
sq->stats.packets++;
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
} }
sq->stats.bytes += num_bytes;
wi->num_bytes = num_bytes; wi->num_bytes = num_bytes;
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
...@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (bf) if (bf)
sq->bf_budget--; sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_unmap_wqe_err: dma_unmap_wqe_err:
......
...@@ -209,6 +209,7 @@ struct mlx5_esw_offload { ...@@ -209,6 +209,7 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps; struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode; u8 inline_mode;
u64 num_flows;
}; };
struct mlx5_eswitch { struct mlx5_eswitch {
...@@ -271,6 +272,11 @@ struct mlx5_flow_handle * ...@@ -271,6 +272,11 @@ struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
......
...@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec, &flow_act, dest, i); spec, &flow_act, dest, i);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
else
esw->offloads.num_flows++;
return rule; return rule;
} }
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_fc *counter = NULL;
if (!IS_ERR(rule)) {
counter = mlx5_flow_rule_counter(rule);
mlx5_del_flow_rules(rule);
mlx5_fc_destroy(esw->dev, counter);
esw->offloads.num_flows--;
}
}
static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
{ {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
...@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) ...@@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (esw->offloads.num_flows > 0) {
esw_warn(dev, "Can't set inline mode when flows are configured\n");
return -EOPNOTSUPP;
}
err = esw_inline_mode_from_devlink(mode, &mlx5_mode); err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
if (err) if (err)
goto out; goto out;
......
...@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = { ...@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
[2] = { [2] = {
.mask = MLX5_PROF_MASK_QP_SIZE | .mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE, MLX5_PROF_MASK_MR_CACHE,
.log_max_qp = 17, .log_max_qp = 18,
.mr_cache[0] = { .mr_cache[0] = {
.size = 500, .size = 500,
.limit = 250 .limit = 250
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment