Commit 3cf69cc8 authored by Amir Vadai's avatar Amir Vadai Committed by Roland Dreier

IB/mlx4: Add CM paravirtualization

In CM para-virtualization:

1. Incoming requests are steered to the correct vHCA according to the
   embedded GID.
2. Communication IDs on outgoing requests are replaced by a globally
   unique ID, generated by the PPF, since there is no synchronization
   of ID generation between guests (and so these IDs are not
   guaranteed to be globally unique).  The guest's comm ID is stored,
   and is returned to the response MAD when it arrives.
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.co.il>
Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent b9c5d6a6
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o
This diff is collapsed.
...@@ -544,6 +544,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, ...@@ -544,6 +544,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
(struct ib_sa_mad *) mad)) (struct ib_sa_mad *) mad))
return 0; return 0;
break; break;
case IB_MGMT_CLASS_CM:
if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
return 0;
break;
case IB_MGMT_CLASS_DEVICE_MGMT: case IB_MGMT_CLASS_DEVICE_MGMT:
if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
return 0; return 0;
...@@ -1076,6 +1080,11 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc ...@@ -1076,6 +1080,11 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
(struct ib_sa_mad *) &tunnel->mad)) (struct ib_sa_mad *) &tunnel->mad))
return; return;
break; break;
case IB_MGMT_CLASS_CM:
if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
(struct ib_mad *) &tunnel->mad))
return;
break;
case IB_MGMT_CLASS_DEVICE_MGMT: case IB_MGMT_CLASS_DEVICE_MGMT:
if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
...@@ -1790,6 +1799,7 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) ...@@ -1790,6 +1799,7 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
dev->sriov.is_going_down = 0; dev->sriov.is_going_down = 0;
spin_lock_init(&dev->sriov.going_down_lock); spin_lock_init(&dev->sriov.going_down_lock);
mlx4_ib_cm_paravirt_init(dev);
mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
...@@ -1818,6 +1828,7 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) ...@@ -1818,6 +1828,7 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
--i; --i;
} }
mlx4_ib_cm_paravirt_clean(dev, -1);
return err; return err;
} }
...@@ -1833,7 +1844,7 @@ void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) ...@@ -1833,7 +1844,7 @@ void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
dev->sriov.is_going_down = 1; dev->sriov.is_going_down = 1;
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
if (mlx4_is_master(dev->dev)) if (mlx4_is_master(dev->dev)) {
for (i = 0; i < dev->num_ports; i++) { for (i = 0; i < dev->num_ports; i++) {
flush_workqueue(dev->sriov.demux[i].ud_wq); flush_workqueue(dev->sriov.demux[i].ud_wq);
mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
...@@ -1841,4 +1852,7 @@ void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) ...@@ -1841,4 +1852,7 @@ void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
dev->sriov.sqps[i] = NULL; dev->sriov.sqps[i] = NULL;
mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
} }
mlx4_ib_cm_paravirt_clean(dev, -1);
}
} }
...@@ -348,6 +348,12 @@ struct mlx4_ib_sriov { ...@@ -348,6 +348,12 @@ struct mlx4_ib_sriov {
* it may be called from interrupt context.*/ * it may be called from interrupt context.*/
spinlock_t going_down_lock; spinlock_t going_down_lock;
int is_going_down; int is_going_down;
/* CM paravirtualization fields */
struct list_head cm_list;
spinlock_t id_map_lock;
struct rb_root sl_id_map;
struct idr pv_id_table;
}; };
struct mlx4_ib_iboe { struct mlx4_ib_iboe {
...@@ -591,4 +597,13 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, ...@@ -591,4 +597,13 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad); u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx); __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
struct ib_mad *mad);
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
struct ib_mad *mad);
void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
#endif /* MLX4_IB_H */ #endif /* MLX4_IB_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment