Commit 3ef967a4 authored by Moni Shoua's avatar Moni Shoua Committed by Doug Ledford

IB/mlx4: Enable send of RoCE QP1 packets with IP/UDP headers

RoCEv2 packets are sent over IP/UDP protocols.
The mlx4 driver uses a type of RAW QP to send packets for QP1 and
therefore needs to build the network headers below BTH in software.

This patch adds option to build QP1 packets with IP and UDP headers if
RoCEv2 is requested.
Signed-off-by: default avatarMoni Shoua <monis@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 71a39bbb
...@@ -280,7 +280,7 @@ static const struct ib_field deth_table[] = { ...@@ -280,7 +280,7 @@ static const struct ib_field deth_table[] = {
.size_bits = 24 } .size_bits = 24 }
}; };
__be16 ib_ud_ip4_csum(struct ib_ud_header *header) __sum16 ib_ud_ip4_csum(struct ib_ud_header *header)
{ {
struct iphdr iph; struct iphdr iph;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <net/ip.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -2285,6 +2286,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, ...@@ -2285,6 +2286,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0; return 0;
} }
#define MLX4_ROCEV2_QP1_SPORT 0xC000
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
void *wqe, unsigned *mlx_seg_len) void *wqe, unsigned *mlx_seg_len)
{ {
...@@ -2304,6 +2306,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2304,6 +2306,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
bool is_eth; bool is_eth;
bool is_vlan = false; bool is_vlan = false;
bool is_grh; bool is_grh;
bool is_udp = false;
int ip_version = 0;
send_size = 0; send_size = 0;
for (i = 0; i < wr->wr.num_sge; ++i) for (i = 0; i < wr->wr.num_sge; ++i)
...@@ -2312,6 +2316,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2312,6 +2316,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
is_grh = mlx4_ib_ah_grh_present(ah); is_grh = mlx4_ib_ah_grh_present(ah);
if (is_eth) { if (is_eth) {
struct ib_gid_attr gid_attr;
if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
/* When multi-function is enabled, the ib_core gid /* When multi-function is enabled, the ib_core gid
* indexes don't necessarily match the hw ones, so * indexes don't necessarily match the hw ones, so
...@@ -2325,20 +2331,33 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2325,20 +2331,33 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
err = ib_get_cached_gid(ib_dev, err = ib_get_cached_gid(ib_dev,
be32_to_cpu(ah->av.ib.port_pd) >> 24, be32_to_cpu(ah->av.ib.port_pd) >> 24,
ah->av.ib.gid_index, &sgid, ah->av.ib.gid_index, &sgid,
NULL); &gid_attr);
if (!err && !memcmp(&sgid, &zgid, sizeof(sgid))) if (!err) {
err = -ENOENT; if (gid_attr.ndev)
if (err) dev_put(gid_attr.ndev);
if (!memcmp(&sgid, &zgid, sizeof(sgid)))
err = -ENOENT;
}
if (!err) {
is_udp = gid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
if (is_udp) {
if (ipv6_addr_v4mapped((struct in6_addr *)&sgid))
ip_version = 4;
else
ip_version = 6;
is_grh = false;
}
} else {
return err; return err;
}
} }
if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
is_vlan = 1; is_vlan = 1;
} }
} }
err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
0, 0, 0, &sqp->ud_header); ip_version, is_udp, 0, &sqp->ud_header);
if (err) if (err)
return err; return err;
...@@ -2349,7 +2368,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2349,7 +2368,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f);
} }
if (is_grh) { if (is_grh || (ip_version == 6)) {
sqp->ud_header.grh.traffic_class = sqp->ud_header.grh.traffic_class =
(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
sqp->ud_header.grh.flow_label = sqp->ud_header.grh.flow_label =
...@@ -2378,6 +2397,25 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2378,6 +2397,25 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
ah->av.ib.dgid, 16); ah->av.ib.dgid, 16);
} }
if (ip_version == 4) {
sqp->ud_header.ip4.tos =
(be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff;
sqp->ud_header.ip4.id = 0;
sqp->ud_header.ip4.frag_off = htons(IP_DF);
sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit;
memcpy(&sqp->ud_header.ip4.saddr,
sgid.raw + 12, 4);
memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4);
sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header);
}
if (is_udp) {
sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT);
sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT);
sqp->ud_header.udp.csum = 0;
}
mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
if (!is_eth) { if (!is_eth) {
...@@ -2406,8 +2444,12 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2406,8 +2444,12 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
if (is_eth) { if (is_eth) {
struct in6_addr in6; struct in6_addr in6;
u16 ether_type;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
ether_type = (!is_udp) ? MLX4_IB_IBOE_ETHERTYPE :
(ip_version == 4 ? ETH_P_IP : ETH_P_IPV6);
mlx->sched_prio = cpu_to_be16(pcp); mlx->sched_prio = cpu_to_be16(pcp);
ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac); ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac);
...@@ -2420,9 +2462,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, ...@@ -2420,9 +2462,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
if (!is_vlan) { if (!is_vlan) {
sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); sqp->ud_header.eth.type = cpu_to_be16(ether_type);
} else { } else {
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); sqp->ud_header.vlan.type = cpu_to_be16(ether_type);
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
} }
} else { } else {
......
...@@ -234,7 +234,7 @@ struct ib_unpacked_ip4 { ...@@ -234,7 +234,7 @@ struct ib_unpacked_ip4 {
__be16 frag_off; __be16 frag_off;
u8 ttl; u8 ttl;
u8 protocol; u8 protocol;
__be16 check; __sum16 check;
__be32 saddr; __be32 saddr;
__be32 daddr; __be32 daddr;
}; };
...@@ -280,7 +280,7 @@ void ib_unpack(const struct ib_field *desc, ...@@ -280,7 +280,7 @@ void ib_unpack(const struct ib_field *desc,
void *buf, void *buf,
void *structure); void *structure);
__be16 ib_ud_ip4_csum(struct ib_ud_header *header); __sum16 ib_ud_ip4_csum(struct ib_ud_header *header);
int ib_ud_header_init(int payload_bytes, int ib_ud_header_init(int payload_bytes,
int lrh_present, int lrh_present,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment