Commit d5ed8ac3 authored by Mark Bloch's avatar Mark Bloch Committed by Jason Gunthorpe

RDMA/mlx5: Move default representors SQ steering to rule to modify QP

Currently the steering for SQs created on representors is done on
creation, once we move to representors as ports of an IB device we need
the port argument which is given only at the modify QP stage, adjust the
code appropriately.
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6a4d00be
...@@ -146,22 +146,21 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport) ...@@ -146,22 +146,21 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
return mlx5_eswitch_vport_rep(esw, vport); return mlx5_eswitch_vport_rep(esw, vport);
} }
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq) struct mlx5_ib_sq *sq,
u16 port)
{ {
struct mlx5_flow_handle *flow_rule;
struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep;
if (!dev->is_rep) if (!dev->is_rep || !port)
return 0; return NULL;
flow_rule = if (!dev->port[port - 1].rep)
mlx5_eswitch_add_send_to_vport_rule(esw, return ERR_PTR(-EINVAL);
dev->port[0].rep->vport,
sq->base.mqp.qpn);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
sq->flow_rule = flow_rule;
return 0; rep = dev->port[port - 1].rep;
return mlx5_eswitch_add_send_to_vport_rule(esw, rep->vport,
sq->base.mqp.qpn);
} }
...@@ -20,8 +20,9 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, ...@@ -20,8 +20,9 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
int vport_index); int vport_index);
void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev); void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev);
void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev); void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev);
int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq); struct mlx5_ib_sq *sq,
u16 port);
struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
int vport_index); int vport_index);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
...@@ -52,10 +53,12 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, ...@@ -52,10 +53,12 @@ struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {} static inline void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev) {}
static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {} static inline void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev) {}
static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, static inline
struct mlx5_ib_sq *sq) struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq,
u16 port)
{ {
return 0; return NULL;
} }
static inline static inline
......
...@@ -92,6 +92,7 @@ struct mlx5_modify_raw_qp_param { ...@@ -92,6 +92,7 @@ struct mlx5_modify_raw_qp_param {
struct mlx5_rate_limit rl; struct mlx5_rate_limit rl;
u8 rq_q_ctr_id; u8 rq_q_ctr_id;
u16 port;
}; };
static void get_cqs(enum ib_qp_type qp_type, static void get_cqs(enum ib_qp_type qp_type,
...@@ -1213,11 +1214,11 @@ static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, ...@@ -1213,11 +1214,11 @@ static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
} }
static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev, static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
struct mlx5_ib_sq *sq)
{ {
if (sq->flow_rule) if (sq->flow_rule)
mlx5_del_flow_rules(sq->flow_rule); mlx5_del_flow_rules(sq->flow_rule);
sq->flow_rule = NULL;
} }
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
...@@ -1285,15 +1286,8 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1285,15 +1286,8 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
if (err) if (err)
goto err_umem; goto err_umem;
err = create_flow_rule_vport_sq(dev, sq);
if (err)
goto err_flow;
return 0; return 0;
err_flow:
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
err_umem: err_umem:
ib_umem_release(sq->ubuffer.umem); ib_umem_release(sq->ubuffer.umem);
sq->ubuffer.umem = NULL; sq->ubuffer.umem = NULL;
...@@ -1304,7 +1298,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1304,7 +1298,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq) struct mlx5_ib_sq *sq)
{ {
destroy_flow_rule_vport_sq(dev, sq); destroy_flow_rule_vport_sq(sq);
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem); ib_umem_release(sq->ubuffer.umem);
} }
...@@ -3269,6 +3263,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -3269,6 +3263,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
} }
if (modify_sq) { if (modify_sq) {
struct mlx5_flow_handle *flow_rule;
if (tx_affinity) { if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq, err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity, tx_affinity,
...@@ -3277,8 +3273,25 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -3277,8 +3273,25 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err; return err;
} }
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, flow_rule = create_flow_rule_vport_sq(dev, sq,
raw_qp_param, qp->ibqp.pd); raw_qp_param->port);
if (IS_ERR(flow_rule))
return err;
err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
raw_qp_param, qp->ibqp.pd);
if (err) {
if (flow_rule)
mlx5_del_flow_rules(flow_rule);
return err;
}
if (flow_rule) {
destroy_flow_rule_vport_sq(sq);
sq->flow_rule = flow_rule;
}
return err;
} }
return 0; return 0;
...@@ -3561,6 +3574,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -3561,6 +3574,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
} }
if (attr_mask & IB_QP_PORT)
raw_qp_param.port = attr->port_num;
if (attr_mask & IB_QP_RATE_LIMIT) { if (attr_mask & IB_QP_RATE_LIMIT) {
raw_qp_param.rl.rate = attr->rate_limit; raw_qp_param.rl.rate = attr->rate_limit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment