Commit bcc73547 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-net'

Or Gerlitz says:

====================
Setup mlx4 user space Ethernet QPs to properly handle VXLAN

This short series fixes the mlx4 driver setting of user space Ethernet QPs
(e.g those opened by DPDK applications) such that they will properly handle
VXLAN traffic/offloads
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 362b37be d2fce8a9
...@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) ...@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
return err; return err;
} }
static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
u64 *reg_id)
{
void *ib_flow;
union ib_flow_spec *ib_spec;
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
ib_spec = (union ib_flow_spec *)ib_flow;
if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
flow_attr->port, qp->qp_num,
MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
reg_id);
return err;
}
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr, struct ib_flow_attr *flow_attr,
int domain) int domain)
...@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, ...@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
i++; i++;
} }
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
if (err)
goto err_free;
}
return &mflow->ibflow; return &mflow->ibflow;
err_free: err_free:
......
...@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
} }
} }
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH; MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
context->srqn = cpu_to_be32(7 << 28);
}
}
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer( int is_eth = rdma_port_get_link_layer(
......
...@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad ...@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
int qpn, u64 *reg_id) int qpn, u64 *reg_id)
{ {
int err; int err;
struct mlx4_spec_list spec_eth_outer = { {NULL} };
struct mlx4_spec_list spec_vxlan = { {NULL} };
struct mlx4_spec_list spec_eth_inner = { {NULL} };
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return 0; /* do nothing */ return 0; /* do nothing */
rule.port = priv->port; err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
rule.qpn = qpn; MLX4_DOMAIN_NIC, reg_id);
INIT_LIST_HEAD(&rule.list);
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
list_add_tail(&spec_eth_outer.list, &rule.list);
list_add_tail(&spec_vxlan.list, &rule.list);
list_add_tail(&spec_eth_inner.list, &rule.list);
err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
if (err) { if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err); en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err; return err;
......
...@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) ...@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
} }
EXPORT_SYMBOL_GPL(mlx4_flow_detach); EXPORT_SYMBOL_GPL(mlx4_flow_detach);
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
struct mlx4_spec_list spec_eth_outer = { {NULL} };
struct mlx4_spec_list spec_vxlan = { {NULL} };
struct mlx4_spec_list spec_eth_inner = { {NULL} };
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
};
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
rule.port = port;
rule.qpn = qpn;
rule.priority = prio;
INIT_LIST_HEAD(&rule.list);
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
list_add_tail(&spec_eth_outer.list, &rule.list);
list_add_tail(&spec_vxlan.list, &rule.list);
list_add_tail(&spec_eth_inner.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
return err;
}
EXPORT_SYMBOL(mlx4_tunnel_steer_add);
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn) u32 max_range_qpn)
{ {
......
...@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, ...@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id); enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val); int i, int val);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment