Commit 2811ba51 authored by Achiad Shochat's avatar Achiad Shochat Committed by Doug Ledford

IB/mlx5: Add RoCE fields to Address Vector

Set the address handle and QP address path fields according to the
link layer type (IB/Eth).
Signed-off-by: default avatarAchiad Shochat <achiad@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 3cca2606
...@@ -32,8 +32,10 @@ ...@@ -32,8 +32,10 @@
#include "mlx5_ib.h" #include "mlx5_ib.h"
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
struct mlx5_ib_ah *ah) struct mlx5_ib_ah *ah,
struct ib_ah_attr *ah_attr,
enum rdma_link_layer ll)
{ {
if (ah_attr->ah_flags & IB_AH_GRH) { if (ah_attr->ah_flags & IB_AH_GRH) {
memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16); memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
...@@ -44,9 +46,20 @@ struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, ...@@ -44,9 +46,20 @@ struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
ah->av.tclass = ah_attr->grh.traffic_class; ah->av.tclass = ah_attr->grh.traffic_class;
} }
ah->av.rlid = cpu_to_be16(ah_attr->dlid); ah->av.stat_rate_sl = (ah_attr->static_rate << 4);
ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
ah->av.stat_rate_sl = (ah_attr->static_rate << 4) | (ah_attr->sl & 0xf); if (ll == IB_LINK_LAYER_ETHERNET) {
memcpy(ah->av.rmac, ah_attr->dmac, sizeof(ah_attr->dmac));
ah->av.udp_sport =
mlx5_get_roce_udp_sport(dev,
ah_attr->port_num,
ah_attr->grh.sgid_index);
ah->av.stat_rate_sl |= (ah_attr->sl & 0x7) << 1;
} else {
ah->av.rlid = cpu_to_be16(ah_attr->dlid);
ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
ah->av.stat_rate_sl |= (ah_attr->sl & 0xf);
}
return &ah->ibah; return &ah->ibah;
} }
...@@ -54,12 +67,19 @@ struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, ...@@ -54,12 +67,19 @@ struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{ {
struct mlx5_ib_ah *ah; struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
enum rdma_link_layer ll;
ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC); ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
return create_ib_ah(ah_attr, ah); /* never fails */ return create_ib_ah(dev, ah, ah_attr, ll); /* never fails */
} }
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
...@@ -252,6 +253,26 @@ static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, ...@@ -252,6 +253,26 @@ static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
return set_roce_addr(device, port_num, index, NULL, NULL); return set_roce_addr(device, port_num, index, NULL, NULL);
} }
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index)
{
struct ib_gid_attr attr;
union ib_gid gid;
if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
return 0;
if (!attr.ndev)
return 0;
dev_put(attr.ndev);
if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
}
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{ {
return !dev->mdev->issi; return !dev->mdev->issi;
......
...@@ -517,8 +517,6 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); ...@@ -517,8 +517,6 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad); const void *in_mad, void *response_mad);
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr); int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah); int mlx5_ib_destroy_ah(struct ib_ah *ah);
...@@ -647,6 +645,9 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} ...@@ -647,6 +645,9 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
static inline void init_query_mad(struct ib_smp *mad) static inline void init_query_mad(struct ib_smp *mad)
{ {
mad->base_version = 1; mad->base_version = 1;
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "user.h" #include "user.h"
...@@ -1364,17 +1365,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, ...@@ -1364,17 +1365,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask, struct mlx5_qp_path *path, u8 port, int attr_mask,
u32 path_flags, const struct ib_qp_attr *attr) u32 path_flags, const struct ib_qp_attr *attr)
{ {
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err; int err;
path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
if (attr_mask & IB_QP_PKEY_INDEX) if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = attr->pkey_index; path->pkey_index = attr->pkey_index;
path->grh_mlid = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
if (ah->ah_flags & IB_AH_GRH) { if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= if (ah->grh.sgid_index >=
dev->mdev->port_caps[port - 1].gid_table_len) { dev->mdev->port_caps[port - 1].gid_table_len) {
...@@ -1383,7 +1379,27 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, ...@@ -1383,7 +1379,27 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
dev->mdev->port_caps[port - 1].gid_table_len); dev->mdev->port_caps[port - 1].gid_table_len);
return -EINVAL; return -EINVAL;
} }
path->grh_mlid |= 1 << 7; }
if (ll == IB_LINK_LAYER_ETHERNET) {
if (!(ah->ah_flags & IB_AH_GRH))
return -EINVAL;
memcpy(path->rmac, ah->dmac, sizeof(ah->dmac));
path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
path->grh_mlid |= 1 << 7;
path->dci_cfi_prio_sl = ah->sl & 0xf;
}
if (ah->ah_flags & IB_AH_GRH) {
path->mgid_index = ah->grh.sgid_index; path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit; path->hop_limit = ah->grh.hop_limit;
path->tclass_flowlabel = path->tclass_flowlabel =
...@@ -1401,8 +1417,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, ...@@ -1401,8 +1417,6 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
if (attr_mask & IB_QP_TIMEOUT) if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = attr->timeout << 3; path->ackto_lt = attr->timeout << 3;
path->sl = ah->sl & 0xf;
return 0; return 0;
} }
...@@ -1765,15 +1779,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1765,15 +1779,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
int err = -EINVAL; int err = -EINVAL;
int port; int port;
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
mutex_lock(&qp->mutex); mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
IB_LINK_LAYER_UNSPECIFIED)) ll))
goto out; goto out;
if ((attr_mask & IB_QP_PORT) && if ((attr_mask & IB_QP_PORT) &&
...@@ -3003,7 +3023,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at ...@@ -3003,7 +3023,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports)) ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
return; return;
ib_ah_attr->sl = path->sl & 0xf; ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->dlid = be16_to_cpu(path->rlid);
ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
......
...@@ -248,8 +248,12 @@ struct mlx5_av { ...@@ -248,8 +248,12 @@ struct mlx5_av {
__be32 dqp_dct; __be32 dqp_dct;
u8 stat_rate_sl; u8 stat_rate_sl;
u8 fl_mlid; u8 fl_mlid;
__be16 rlid; union {
u8 reserved0[10]; __be16 rlid;
__be16 udp_sport;
};
u8 reserved0[4];
u8 rmac[6];
u8 tclass; u8 tclass;
u8 hop_limit; u8 hop_limit;
__be32 grh_gid_fl; __be32 grh_gid_fl;
...@@ -456,11 +460,16 @@ struct mlx5_qp_path { ...@@ -456,11 +460,16 @@ struct mlx5_qp_path {
u8 static_rate; u8 static_rate;
u8 hop_limit; u8 hop_limit;
__be32 tclass_flowlabel; __be32 tclass_flowlabel;
u8 rgid[16]; union {
u8 rsvd1[4]; u8 rgid[16];
u8 sl; u8 rip[16];
};
u8 f_dscp_ecn_prio;
u8 ecn_dscp;
__be16 udp_sport;
u8 dci_cfi_prio_sl;
u8 port; u8 port;
u8 rsvd2[6]; u8 rmac[6];
}; };
struct mlx5_qp_context { struct mlx5_qp_context {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment