Commit 79237743 authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5_uar' into rdma.git /for-next

Meir Lichtinger says:

====================
ConnectX-7 supports setting relaxed ordering read/write mkey attribute by
UMR, indicated by new HCA capabilities, so extend mlx5_ib driver to
configure UMR control segment
====================

Based on the mlx5-next branch at
      git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
due to dependencies.

* branch 'mlx5_uar':
  RDMA/mlx5: Set mkey relaxed ordering by UMR with ConnectX-7
  RDMA/mlx5: Use MLX5_SET macro instead of local structure
  RDMA/mlx5: ConnectX-7 new capabilities to set relaxed ordering by UMR
parents 8b603d07 896ec973
...@@ -1356,15 +1356,6 @@ static inline void init_query_mad(struct ib_smp *mad) ...@@ -1356,15 +1356,6 @@ static inline void init_query_mad(struct ib_smp *mad)
mad->method = IB_MGMT_METHOD_GET; mad->method = IB_MGMT_METHOD_GET;
} }
static inline u8 convert_access(int acc)
{
return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
(acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
(acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
(acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
MLX5_PERM_LOCAL_READ;
}
static inline int is_qp1(enum ib_qp_type qp_type) static inline int is_qp1(enum ib_qp_type qp_type)
{ {
return qp_type == MLX5_IB_QPT_HW_GSI; return qp_type == MLX5_IB_QPT_HW_GSI;
...@@ -1463,8 +1454,13 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, ...@@ -1463,8 +1454,13 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
return false; return false;
if (access_flags & IB_ACCESS_RELAXED_ORDERING && if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) || MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))) !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return false;
if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return false; return false;
return true; return true;
......
...@@ -263,7 +263,9 @@ static __be64 get_umr_update_translation_mask(void) ...@@ -263,7 +263,9 @@ static __be64 get_umr_update_translation_mask(void)
return cpu_to_be64(result); return cpu_to_be64(result);
} }
static __be64 get_umr_update_access_mask(int atomic) static __be64 get_umr_update_access_mask(int atomic,
int relaxed_ordering_write,
int relaxed_ordering_read)
{ {
u64 result; u64 result;
...@@ -275,6 +277,12 @@ static __be64 get_umr_update_access_mask(int atomic) ...@@ -275,6 +277,12 @@ static __be64 get_umr_update_access_mask(int atomic)
if (atomic) if (atomic)
result |= MLX5_MKEY_MASK_A; result |= MLX5_MKEY_MASK_A;
if (relaxed_ordering_write)
result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
if (relaxed_ordering_read)
result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
return cpu_to_be64(result); return cpu_to_be64(result);
} }
...@@ -289,17 +297,28 @@ static __be64 get_umr_update_pd_mask(void) ...@@ -289,17 +297,28 @@ static __be64 get_umr_update_pd_mask(void)
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{ {
if ((mask & MLX5_MKEY_MASK_PAGE_SIZE && if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) || MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
(mask & MLX5_MKEY_MASK_A &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
return -EPERM; return -EPERM;
if (mask & MLX5_MKEY_MASK_A &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return -EPERM;
if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return -EPERM;
if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return -EPERM;
return 0; return 0;
} }
static int set_reg_umr_segment(struct mlx5_ib_dev *dev, static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
struct mlx5_wqe_umr_ctrl_seg *umr, struct mlx5_wqe_umr_ctrl_seg *umr,
const struct ib_send_wr *wr, int atomic) const struct ib_send_wr *wr)
{ {
const struct mlx5_umr_wr *umrwr = umr_wr(wr); const struct mlx5_umr_wr *umrwr = umr_wr(wr);
...@@ -325,7 +344,10 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, ...@@ -325,7 +344,10 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
umr->mkey_mask |= get_umr_update_translation_mask(); umr->mkey_mask |= get_umr_update_translation_mask();
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
umr->mkey_mask |= get_umr_update_access_mask(atomic); umr->mkey_mask |= get_umr_update_access_mask(
!!(MLX5_CAP_GEN(dev->mdev, atomic)),
!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)),
!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)));
umr->mkey_mask |= get_umr_update_pd_mask(); umr->mkey_mask |= get_umr_update_pd_mask();
} }
if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
...@@ -383,20 +405,31 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, ...@@ -383,20 +405,31 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
memset(seg, 0, sizeof(*seg)); memset(seg, 0, sizeof(*seg));
if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
seg->status = MLX5_MKEY_STATUS_FREE; MLX5_SET(mkc, seg, free, 1);
MLX5_SET(mkc, seg, a,
!!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, seg, rw,
!!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, seg, lr, 1);
MLX5_SET(mkc, seg, relaxed_ordering_write,
!!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
MLX5_SET(mkc, seg, relaxed_ordering_read,
!!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
seg->flags = convert_access(umrwr->access_flags);
if (umrwr->pd) if (umrwr->pd)
seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn);
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
!umrwr->length) !umrwr->length)
seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); MLX5_SET(mkc, seg, length64, 1);
seg->start_addr = cpu_to_be64(umrwr->virt_addr); MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr);
seg->len = cpu_to_be64(umrwr->length); MLX5_SET64(mkc, seg, len, umrwr->length);
seg->log2_page_size = umrwr->page_shift; MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift);
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | MLX5_SET(mkc, seg, qpn, 0xffffff);
mlx5_mkey_variant(umrwr->mkey)); MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey));
} }
static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
...@@ -1224,8 +1257,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1224,8 +1257,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
(*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey); (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
err = set_reg_umr_segment(dev, *seg, wr, err = set_reg_umr_segment(dev, *seg, wr);
!!(MLX5_CAP_GEN(dev->mdev, atomic)));
if (unlikely(err)) if (unlikely(err))
goto out; goto out;
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
......
...@@ -299,11 +299,18 @@ void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas) ...@@ -299,11 +299,18 @@ void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
} }
EXPORT_SYMBOL_GPL(mlx5_fill_page_array); EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas) void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{ {
int i; int i;
WARN_ON(perm & 0xfc);
for (i = 0; i < buf->npages; i++) for (i = 0; i < buf->npages; i++)
pas[i] = cpu_to_be64(buf->frags[i].map); pas[i] = cpu_to_be64(buf->frags[i].map | perm);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
mlx5_fill_page_frag_array_perm(buf, pas, 0);
} }
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array); EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
...@@ -1598,6 +1598,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1598,6 +1598,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
static bool counter_is_valid(u32 action) static bool counter_is_valid(u32 action)
{ {
return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP | return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ALLOW |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)); MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
} }
......
...@@ -276,7 +276,9 @@ enum { ...@@ -276,7 +276,9 @@ enum {
MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_RW = 1ull << 20,
MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_A = 1ull << 21,
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
MLX5_MKEY_MASK_FREE = 1ull << 29, MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
MLX5_MKEY_MASK_FREE = 1ull << 29,
MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
}; };
enum { enum {
...@@ -1007,7 +1009,6 @@ enum { ...@@ -1007,7 +1009,6 @@ enum {
MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_REMOTE_INVAL = 1 << 24,
MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
MLX5_MKEY_BSF_EN = 1 << 30, MLX5_MKEY_BSF_EN = 1 << 30,
MLX5_MKEY_LEN64 = 1 << 31,
}; };
struct mlx5_mkey_seg { struct mlx5_mkey_seg {
...@@ -1361,11 +1362,11 @@ enum mlx5_qcam_feature_groups { ...@@ -1361,11 +1362,11 @@ enum mlx5_qcam_feature_groups {
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(device_virtio_emulation_cap, \ MLX5_GET(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(device_virtio_emulation_cap, \ MLX5_GET64(virtio_emulation_cap, \
(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
#define MLX5_CAP_IPSEC(mdev, cap)\ #define MLX5_CAP_IPSEC(mdev, cap)\
......
...@@ -971,6 +971,7 @@ void mlx5_register_debugfs(void); ...@@ -971,6 +971,7 @@ void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void); void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn); unsigned int *irqn);
...@@ -1053,6 +1054,7 @@ enum { ...@@ -1053,6 +1054,7 @@ enum {
enum { enum {
MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_IB = 0,
MLX5_INTERFACE_PROTOCOL_ETH = 1, MLX5_INTERFACE_PROTOCOL_ETH = 1,
MLX5_INTERFACE_PROTOCOL_VDPA = 2,
}; };
struct mlx5_interface { struct mlx5_interface {
......
...@@ -93,6 +93,7 @@ enum { ...@@ -93,6 +93,7 @@ enum {
enum { enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03, MLX5_OBJ_TYPE_PSV = 0xff03,
...@@ -981,17 +982,40 @@ struct mlx5_ifc_device_event_cap_bits { ...@@ -981,17 +982,40 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40]; u8 user_unaffiliated_events[4][0x40];
}; };
struct mlx5_ifc_device_virtio_emulation_cap_bits { struct mlx5_ifc_virtio_emulation_cap_bits {
u8 reserved_at_0[0x20]; u8 desc_tunnel_offload_type[0x1];
u8 eth_frame_offload_type[0x1];
u8 virtio_version_1_0[0x1];
u8 device_features_bits_mask[0xd];
u8 event_mode[0x8];
u8 virtio_queue_type[0x8];
u8 reserved_at_20[0x13]; u8 max_tunnel_desc[0x10];
u8 reserved_at_30[0x3];
u8 log_doorbell_stride[0x5]; u8 log_doorbell_stride[0x5];
u8 reserved_at_38[0x3]; u8 reserved_at_38[0x3];
u8 log_doorbell_bar_size[0x5]; u8 log_doorbell_bar_size[0x5];
u8 doorbell_bar_offset[0x40]; u8 doorbell_bar_offset[0x40];
u8 reserved_at_80[0x780]; u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
u8 reserved_at_a0[0x60];
u8 umem_1_buffer_param_a[0x20];
u8 umem_1_buffer_param_b[0x20];
u8 umem_2_buffer_param_a[0x20];
u8 umem_2_buffer_param_b[0x20];
u8 umem_3_buffer_param_a[0x20];
u8 umem_3_buffer_param_b[0x20];
u8 reserved_at_1c0[0x640];
}; };
enum { enum {
...@@ -1216,7 +1240,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1216,7 +1240,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_sgl_for_optimized_performance[0x8]; u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8]; u8 log_max_cq_sz[0x8];
u8 reserved_at_d0[0xb]; u8 relaxed_ordering_write_umr[0x1];
u8 relaxed_ordering_read_umr[0x1];
u8 reserved_at_d2[0x7];
u8 virtio_net_device_emualtion_manager[0x1];
u8 virtio_blk_device_emualtion_manager[0x1];
u8 log_max_cq[0x5]; u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8]; u8 log_max_eq_sz[0x8];
...@@ -2952,7 +2980,7 @@ union mlx5_ifc_hca_cap_union_bits { ...@@ -2952,7 +2980,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap; struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000]; u8 reserved_at_0[0x8000];
}; };
...@@ -3298,15 +3326,18 @@ struct mlx5_ifc_scheduling_context_bits { ...@@ -3298,15 +3326,18 @@ struct mlx5_ifc_scheduling_context_bits {
}; };
struct mlx5_ifc_rqtc_bits { struct mlx5_ifc_rqtc_bits {
u8 reserved_at_0[0xa0]; u8 reserved_at_0[0xa0];
u8 reserved_at_a0[0x10]; u8 reserved_at_a0[0x5];
u8 rqt_max_size[0x10]; u8 list_q_type[0x3];
u8 reserved_at_a8[0x8];
u8 rqt_max_size[0x10];
u8 reserved_at_c0[0x10]; u8 rq_vhca_id_format[0x1];
u8 rqt_actual_size[0x10]; u8 reserved_at_c1[0xf];
u8 rqt_actual_size[0x10];
u8 reserved_at_e0[0x6a0]; u8 reserved_at_e0[0x6a0];
struct mlx5_ifc_rq_num_bits rq_num[]; struct mlx5_ifc_rq_num_bits rq_num[];
}; };
...@@ -7084,7 +7115,7 @@ struct mlx5_ifc_destroy_mkey_out_bits { ...@@ -7084,7 +7115,7 @@ struct mlx5_ifc_destroy_mkey_out_bits {
struct mlx5_ifc_destroy_mkey_in_bits { struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -7782,7 +7813,7 @@ struct mlx5_ifc_create_mkey_out_bits { ...@@ -7782,7 +7813,7 @@ struct mlx5_ifc_create_mkey_out_bits {
struct mlx5_ifc_create_mkey_in_bits { struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 reserved_at_20[0x10];
u8 op_mod[0x10]; u8 op_mod[0x10];
...@@ -10312,6 +10343,40 @@ struct mlx5_ifc_create_umem_in_bits { ...@@ -10312,6 +10343,40 @@ struct mlx5_ifc_create_umem_in_bits {
struct mlx5_ifc_umem_bits umem; struct mlx5_ifc_umem_bits umem;
}; };
struct mlx5_ifc_create_umem_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x8];
u8 umem_id[0x18];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_umem_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x8];
u8 umem_id[0x18];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_umem_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_create_uctx_in_bits { struct mlx5_ifc_create_uctx_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -10324,6 +10389,18 @@ struct mlx5_ifc_create_uctx_in_bits { ...@@ -10324,6 +10389,18 @@ struct mlx5_ifc_create_uctx_in_bits {
struct mlx5_ifc_uctx_bits uctx; struct mlx5_ifc_uctx_bits uctx;
}; };
struct mlx5_ifc_create_uctx_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x10];
u8 uid[0x10];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_uctx_in_bits { struct mlx5_ifc_destroy_uctx_in_bits {
u8 opcode[0x10]; u8 opcode[0x10];
u8 reserved_at_10[0x10]; u8 reserved_at_10[0x10];
...@@ -10337,6 +10414,15 @@ struct mlx5_ifc_destroy_uctx_in_bits { ...@@ -10337,6 +10414,15 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20]; u8 reserved_at_60[0x20];
}; };
struct mlx5_ifc_destroy_uctx_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_create_sw_icm_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
struct mlx5_ifc_sw_icm_bits sw_icm; struct mlx5_ifc_sw_icm_bits sw_icm;
...@@ -10653,4 +10739,10 @@ struct mlx5_ifc_tls_progress_params_bits { ...@@ -10653,4 +10739,10 @@ struct mlx5_ifc_tls_progress_params_bits {
u8 hw_offset_record_number[0x18]; u8 hw_offset_record_number[0x18];
}; };
enum {
MLX5_MTT_PERM_READ = 1 << 0,
MLX5_MTT_PERM_WRITE = 1 << 1,
MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
};
#endif /* MLX5_IFC_H */ #endif /* MLX5_IFC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment