Commit 9c4ff2a9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2018-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-02-20

The following pull request includes some fixes for the mlx5 core and
netdevice driver.

Please pull and let me know if there's any issue.

-stable 4.10.y:
('net/mlx5e: Fix loopback self test when GRO is off')

-stable 4.12.y:
('net/mlx5e: Specify numa node when allocating drop rq')

-stable 4.13.y:
('net/mlx5e: Verify inline header size do not exceed SKB linear size')

-stable 4.15.y:
('net/mlx5e: Fix TCP checksum in LRO buffers')
('net/mlx5: Fix error handling when adding flow rules')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 943a0d4a 9238e380
......@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
"%pI4");
} else if (ethertype.v == ETH_P_IPV6) {
static const struct in6_addr full_ones = {
.in6_u.u6_addr32 = {htonl(0xffffffff),
htonl(0xffffffff),
htonl(0xffffffff),
htonl(0xffffffff)},
.in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
__constant_htonl(0xffffffff),
__constant_htonl(0xffffffff),
__constant_htonl(0xffffffff)},
};
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
......
......@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
......@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
return mlx5e_alloc_cq_common(mdev, param, cq);
}
......@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
mlx5e_build_drop_rq_param(&rq_param);
mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
......@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
}
#endif
int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
......
......@@ -36,6 +36,7 @@
#include <linux/tcp.h>
#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include <net/ip6_checksum.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
......@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
return true;
}
static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
{
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
tcp->check = 0;
tcp->psh = get_cqe_lro_tcppsh(cqe);
if (tcp_ack) {
tcp->ack = 1;
tcp->ack_seq = cqe->lro_ack_seq_num;
tcp->window = cqe->lro_tcp_win;
}
}
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
int network_depth = 0;
__wsum check;
__be16 proto;
u16 tot_len;
void *ip_p;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
......@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
ipv4->ihl);
mlx5e_lro_update_tcp_hdr(cqe, tcp);
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
tot_len - sizeof(struct iphdr),
IPPROTO_TCP, check);
} else {
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
ipv6->hop_limit = cqe->lro_min_ttl;
ipv6->payload_len = cpu_to_be16(tot_len -
sizeof(struct ipv6hdr));
}
tcp->psh = get_cqe_lro_tcppsh(cqe);
if (tcp_ack) {
tcp->ack = 1;
tcp->ack_seq = cqe->lro_ack_seq_num;
tcp->window = cqe->lro_tcp_win;
ipv6->payload_len = cpu_to_be16(payload_len);
mlx5e_lro_update_tcp_hdr(cqe, tcp);
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
IPPROTO_TCP, check);
}
}
......
......@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
if (iph->protocol != IPPROTO_UDP)
goto out;
udph = udp_hdr(skb);
/* Don't assume skb_transport_header() was set */
udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
if (udph->dest != htons(9))
goto out;
......
......@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
tcf_vlan_push_prio(a))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
......
......@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
default:
hlen = mlx5e_skb_l2_header_offset(skb);
}
return min_t(u16, hlen, skb->len);
return min_t(u16, hlen, skb_headlen(skb));
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
......
......@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Create steering drop counters for ingress and egress ACLs */
if (vport_num && esw->mode == SRIOV_LEGACY)
esw_vport_create_drop_counters(vport);
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
......@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
/* create steering drop counters for ingress and egress ACLs */
if (vport_num && esw->mode == SRIOV_LEGACY)
esw_vport_create_drop_counters(vport);
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
......
......@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_DECAP))
MLX5_FLOW_CONTEXT_ACTION_DECAP |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
return true;
return false;
......@@ -1758,8 +1759,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
/* Collect all fgs which has a matching match_criteria */
err = build_match_list(&match_head, ft, spec);
if (err)
if (err) {
if (take_write)
up_write_ref_node(&ft->node);
return ERR_PTR(err);
}
if (!take_write)
up_read_ref_node(&ft->node);
......@@ -1768,8 +1772,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
dest_num, version);
free_match_list(&match_head);
if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write)
up_write_ref_node(&ft->node);
return rule;
}
if (!take_write) {
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
......
......@@ -34,6 +34,7 @@
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
#include "en.h"
#include "clock.h"
enum {
MLX5_CYCLES_SHIFT = 23
......
......@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
cache_line_size() >= 128 ? 1 : 0);
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment