Commit 095b0927 authored by Ilan Tayari's avatar Ilan Tayari Committed by Saeed Mahameed

IB/mlx5: Respect mlx5_core reserved GIDs

Reserved gids are taken by the mlx5_core, report smaller GID table
size to IB core.

Set mlx5_query_roce_port's return value back to int. In case of
error, return an indication. This rolls back some of the change
in commit 50f22fd8 ("IB/mlx5: Set mlx5_query_roce_port's return value to void")

Change set_roce_addr to use gid_set function, instead of directly
sending the command.
Signed-off-by: default avatarIlan Tayari <ilant@mellanox.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent a6f7d2af
...@@ -223,7 +223,7 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, ...@@ -223,7 +223,7 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0; return 0;
} }
static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_ib_dev *dev = to_mdev(device);
...@@ -232,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ...@@ -232,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
enum ib_mtu ndev_ib_mtu; enum ib_mtu ndev_ib_mtu;
u16 qkey_viol_cntr; u16 qkey_viol_cntr;
u32 eth_prot_oper; u32 eth_prot_oper;
int err;
/* Possible bad flows are checked before filling out props so in case /* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out. * of an error it will still be zeroed out.
*/ */
if (mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num)) err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
return; if (err)
return err;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed, translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width); &props->active_width);
...@@ -258,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ...@@ -258,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
ndev = mlx5_ib_get_netdev(device, port_num); ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev) if (!ndev)
return; return 0;
if (mlx5_lag_is_active(dev->mdev)) { if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock(); rcu_read_lock();
...@@ -281,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ...@@ -281,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev); dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu); props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
return 0;
} }
static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
const struct ib_gid_attr *attr, unsigned int index, const union ib_gid *gid,
void *mlx5_addr) const struct ib_gid_attr *attr)
{ {
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) enum ib_gid_type gid_type = IB_GID_TYPE_IB;
char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, u8 roce_version = 0;
source_l3_address); u8 roce_l3_type = 0;
void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, bool vlan = false;
source_mac_47_32); u8 mac[ETH_ALEN];
u16 vlan_id = 0;
if (!gid) if (gid) {
return; gid_type = attr->gid_type;
ether_addr_copy(mac, attr->ndev->dev_addr);
ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
if (is_vlan_dev(attr->ndev)) { if (is_vlan_dev(attr->ndev)) {
MLX5_SET_RA(mlx5_addr, vlan_valid, 1); vlan = true;
MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); vlan_id = vlan_dev_vlan_id(attr->ndev);
}
} }
switch (attr->gid_type) { switch (gid_type) {
case IB_GID_TYPE_IB: case IB_GID_TYPE_IB:
MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); roce_version = MLX5_ROCE_VERSION_1;
break; break;
case IB_GID_TYPE_ROCE_UDP_ENCAP: case IB_GID_TYPE_ROCE_UDP_ENCAP:
MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); roce_version = MLX5_ROCE_VERSION_2;
if (ipv6_addr_v4mapped((void *)gid))
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
else
roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
break; break;
default: default:
WARN_ON(true); mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
} }
if (attr->gid_type != IB_GID_TYPE_IB) { return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
if (ipv6_addr_v4mapped((void *)gid)) roce_l3_type, gid->raw, mac, vlan,
MLX5_SET_RA(mlx5_addr, roce_l3_type, vlan_id);
MLX5_ROCE_L3_TYPE_IPV4);
else
MLX5_SET_RA(mlx5_addr, roce_l3_type,
MLX5_ROCE_L3_TYPE_IPV6);
}
if ((attr->gid_type == IB_GID_TYPE_IB) ||
!ipv6_addr_v4mapped((void *)gid))
memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
else
memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
}
static int set_roce_addr(struct ib_device *device, u8 port_num,
unsigned int index,
const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
struct mlx5_ib_dev *dev = to_mdev(device);
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
if (ll != IB_LINK_LAYER_ETHERNET)
return -EINVAL;
ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
} }
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
...@@ -357,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, ...@@ -357,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
const struct ib_gid_attr *attr, const struct ib_gid_attr *attr,
__always_unused void **context) __always_unused void **context)
{ {
return set_roce_addr(device, port_num, index, gid, attr); return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
} }
static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
unsigned int index, __always_unused void **context) unsigned int index, __always_unused void **context)
{ {
return set_roce_addr(device, port_num, index, NULL, NULL); return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
} }
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
...@@ -978,20 +954,31 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, ...@@ -978,20 +954,31 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
unsigned int count;
int ret;
switch (mlx5_get_vport_access_method(ibdev)) { switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD: case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_port(ibdev, port, props); ret = mlx5_query_mad_ifc_port(ibdev, port, props);
break;
case MLX5_VPORT_ACCESS_METHOD_HCA: case MLX5_VPORT_ACCESS_METHOD_HCA:
return mlx5_query_hca_port(ibdev, port, props); ret = mlx5_query_hca_port(ibdev, port, props);
break;
case MLX5_VPORT_ACCESS_METHOD_NIC: case MLX5_VPORT_ACCESS_METHOD_NIC:
mlx5_query_port_roce(ibdev, port, props); ret = mlx5_query_port_roce(ibdev, port, props);
return 0; break;
default: default:
return -EINVAL; ret = -EINVAL;
}
if (!ret && props) {
count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
props->gid_tbl_len -= count;
} }
return ret;
} }
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment