Commit a0d8994b authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-odp-dc' into rdma.git for-next

Michael Guralnik says:

====================
The series adds support for on-demand paging for DC transport.

As DC is a mlx-only transport, the capabilities are exposed to the user
using DEVX objects and later on through mlx5dv_query_device.
====================

Based on the mlx5-next branch from
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux for
dependencies

* branch 'mlx5-odp-dc':
  IB/mlx5: Add page fault handler for DC initiator WQE
  IB/mlx5: Remove check of FW capabilities in ODP page fault handling
  net/mlx5: Set ODP capabilities for DC transport to max
parents fd1a52f3 75e46fc0
...@@ -980,17 +980,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, ...@@ -980,17 +980,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
return ret < 0 ? ret : npages; return ret < 0 ? ret : npages;
} }
static const u32 mlx5_ib_odp_opcode_cap[] = {
[MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
[MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
[MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
};
/* /*
* Parse initiator WQE. Advances the wqe pointer to point at the * Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE. * scatter-gather list, and set wqe_end to the end of the WQE.
...@@ -1001,7 +990,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -1001,7 +990,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
{ {
struct mlx5_wqe_ctrl_seg *ctrl = *wqe; struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
u16 wqe_index = pfault->wqe.wqe_index; u16 wqe_index = pfault->wqe.wqe_index;
u32 transport_caps;
struct mlx5_base_av *av; struct mlx5_base_av *av;
unsigned ds, opcode; unsigned ds, opcode;
u32 qpn = qp->trans_qp.base.mqp.qpn; u32 qpn = qp->trans_qp.base.mqp.qpn;
...@@ -1025,31 +1013,11 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -1025,31 +1013,11 @@ static int mlx5_ib_mr_initiator_pfault_handler(
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK; MLX5_WQE_CTRL_OPCODE_MASK;
switch (qp->ibqp.qp_type) { if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
case IB_QPT_XRC_INI:
*wqe += sizeof(struct mlx5_wqe_xrc_seg); *wqe += sizeof(struct mlx5_wqe_xrc_seg);
transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
break;
case IB_QPT_RC:
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
case IB_QPT_UD:
transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
break;
default:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || if (qp->ibqp.qp_type == IB_QPT_UD ||
!(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { qp->qp_sub_type == MLX5_IB_QPT_DCI) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
}
if (qp->ibqp.qp_type == IB_QPT_UD) {
av = *wqe; av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av); *wqe += sizeof(struct mlx5_av);
...@@ -1112,19 +1080,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, ...@@ -1112,19 +1080,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
return -EFAULT; return -EFAULT;
} }
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_RECV))
goto invalid_transport_or_opcode;
break;
default:
invalid_transport_or_opcode:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
*wqe_end = wqe + wqe_size; *wqe_end = wqe + wqe_size;
return 0; return 0;
......
...@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer, ...@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
trace_data->timestamp = timestamp; trace_data->timestamp = timestamp;
trace_data->lost = lost; trace_data->lost = lost;
trace_data->event_id = event_id; trace_data->event_id = event_id;
strncpy(trace_data->msg, msg, TRACE_STR_MSG); strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG);
tracer->st_arr.saved_traces_index = tracer->st_arr.saved_traces_index =
(tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1); (tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
......
...@@ -495,6 +495,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev) ...@@ -495,6 +495,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
if (do_set) if (do_set)
err = set_caps(dev, set_ctx, set_sz, err = set_caps(dev, set_ctx, set_sz,
......
...@@ -948,7 +948,9 @@ struct mlx5_ifc_odp_cap_bits { ...@@ -948,7 +948,9 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
u8 reserved_at_100[0x700]; struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
u8 reserved_at_120[0x6E0];
}; };
struct mlx5_ifc_calc_op { struct mlx5_ifc_calc_op {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment