Commit 26b99066 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA: Change all uapi headers to use __aligned_u64 instead of __u64

The new auditing standard for the subsystem will be to only use
__aligned_64 in uapi headers to try and prevent 32/64 compat bugs
from existing in the future.

Changing all existing usage will help ensure new developers copy the
right idea.

The before/after of this patch was tested using pahole on 32 and 64
bit compiles to confirm it has no change in the structure layout, so
this patch is a NOP.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f2e9bfac
...@@ -65,8 +65,8 @@ struct bnxt_re_pd_resp { ...@@ -65,8 +65,8 @@ struct bnxt_re_pd_resp {
} __attribute__((packed, aligned(4))); } __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req { struct bnxt_re_cq_req {
__u64 cq_va; __aligned_u64 cq_va;
__u64 cq_handle; __aligned_u64 cq_handle;
}; };
struct bnxt_re_cq_resp { struct bnxt_re_cq_resp {
...@@ -77,9 +77,9 @@ struct bnxt_re_cq_resp { ...@@ -77,9 +77,9 @@ struct bnxt_re_cq_resp {
}; };
struct bnxt_re_qp_req { struct bnxt_re_qp_req {
__u64 qpsva; __aligned_u64 qpsva;
__u64 qprva; __aligned_u64 qprva;
__u64 qp_handle; __aligned_u64 qp_handle;
}; };
struct bnxt_re_qp_resp { struct bnxt_re_qp_resp {
...@@ -88,8 +88,8 @@ struct bnxt_re_qp_resp { ...@@ -88,8 +88,8 @@ struct bnxt_re_qp_resp {
}; };
struct bnxt_re_srq_req { struct bnxt_re_srq_req {
__u64 srqva; __aligned_u64 srqva;
__u64 srq_handle; __aligned_u64 srq_handle;
}; };
struct bnxt_re_srq_resp { struct bnxt_re_srq_resp {
......
...@@ -41,21 +41,21 @@ ...@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so * Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to * that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels). * avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64 * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead. * instead.
*/ */
struct iwch_create_cq_req { struct iwch_create_cq_req {
__u64 user_rptr_addr; __aligned_u64 user_rptr_addr;
}; };
struct iwch_create_cq_resp_v0 { struct iwch_create_cq_resp_v0 {
__u64 key; __aligned_u64 key;
__u32 cqid; __u32 cqid;
__u32 size_log2; __u32 size_log2;
}; };
struct iwch_create_cq_resp { struct iwch_create_cq_resp {
__u64 key; __aligned_u64 key;
__u32 cqid; __u32 cqid;
__u32 size_log2; __u32 size_log2;
__u32 memsize; __u32 memsize;
...@@ -63,8 +63,8 @@ struct iwch_create_cq_resp { ...@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
}; };
struct iwch_create_qp_resp { struct iwch_create_qp_resp {
__u64 key; __aligned_u64 key;
__u64 db_key; __aligned_u64 db_key;
__u32 qpid; __u32 qpid;
__u32 size_log2; __u32 size_log2;
__u32 sq_size_log2; __u32 sq_size_log2;
......
...@@ -41,13 +41,13 @@ ...@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so * Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to * that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels). * avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64 * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead. * instead.
*/ */
struct c4iw_create_cq_resp { struct c4iw_create_cq_resp {
__u64 key; __aligned_u64 key;
__u64 gts_key; __aligned_u64 gts_key;
__u64 memsize; __aligned_u64 memsize;
__u32 cqid; __u32 cqid;
__u32 size; __u32 size;
__u32 qid_mask; __u32 qid_mask;
...@@ -59,13 +59,13 @@ enum { ...@@ -59,13 +59,13 @@ enum {
}; };
struct c4iw_create_qp_resp { struct c4iw_create_qp_resp {
__u64 ma_sync_key; __aligned_u64 ma_sync_key;
__u64 sq_key; __aligned_u64 sq_key;
__u64 rq_key; __aligned_u64 rq_key;
__u64 sq_db_gts_key; __aligned_u64 sq_db_gts_key;
__u64 rq_db_gts_key; __aligned_u64 rq_db_gts_key;
__u64 sq_memsize; __aligned_u64 sq_memsize;
__u64 rq_memsize; __aligned_u64 rq_memsize;
__u32 sqid; __u32 sqid;
__u32 rqid; __u32 rqid;
__u32 sq_size; __u32 sq_size;
...@@ -75,7 +75,7 @@ struct c4iw_create_qp_resp { ...@@ -75,7 +75,7 @@ struct c4iw_create_qp_resp {
}; };
struct c4iw_alloc_ucontext_resp { struct c4iw_alloc_ucontext_resp {
__u64 status_page_key; __aligned_u64 status_page_key;
__u32 status_page_size; __u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */ __u32 reserved; /* explicit padding (optional for i386) */
}; };
......
...@@ -79,7 +79,7 @@ struct hfi1_user_info { ...@@ -79,7 +79,7 @@ struct hfi1_user_info {
}; };
struct hfi1_ctxt_info { struct hfi1_ctxt_info {
__u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */ __u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */ __u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */ __u16 unit; /* unit (chip) assigned to caller */
...@@ -98,9 +98,9 @@ struct hfi1_ctxt_info { ...@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info { struct hfi1_tid_info {
/* virtual address of first page in transfer */ /* virtual address of first page in transfer */
__u64 vaddr; __aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */ /* pointer to tid array. this array is big enough */
__u64 tidlist; __aligned_u64 tidlist;
/* number of tids programmed by this request */ /* number of tids programmed by this request */
__u32 tidcnt; __u32 tidcnt;
/* length of transfer buffer programmed by this request */ /* length of transfer buffer programmed by this request */
...@@ -131,23 +131,23 @@ struct hfi1_base_info { ...@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/ */
__u32 bthqp; __u32 bthqp;
/* PIO credit return address, */ /* PIO credit return address, */
__u64 sc_credits_addr; __aligned_u64 sc_credits_addr;
/* /*
* Base address of write-only pio buffers for this process. * Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes. * Each buffer has sendpio_credits*64 bytes.
*/ */
__u64 pio_bufbase_sop; __aligned_u64 pio_bufbase_sop;
/* /*
* Base address of write-only pio buffers for this process. * Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes. * Each buffer has sendpio_credits*64 bytes.
*/ */
__u64 pio_bufbase; __aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */ /* address where receive buffer queue is mapped into */
__u64 rcvhdr_bufbase; __aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */ /* base address of Eager receive buffers. */
__u64 rcvegr_bufbase; __aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */ /* base address of SDMA completion ring */
__u64 sdma_comp_bufbase; __aligned_u64 sdma_comp_bufbase;
/* /*
* User register base for init code, not to be used directly by * User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space. * protocol or applications. Always maps real chip register space.
...@@ -155,20 +155,20 @@ struct hfi1_base_info { ...@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow * ur_rcvtidflow
*/ */
__u64 user_regbase; __aligned_u64 user_regbase;
/* notification events */ /* notification events */
__u64 events_bufbase; __aligned_u64 events_bufbase;
/* status page */ /* status page */
__u64 status_bufbase; __aligned_u64 status_bufbase;
/* rcvhdrtail update */ /* rcvhdrtail update */
__u64 rcvhdrtail_base; __aligned_u64 rcvhdrtail_base;
/* /*
* shared memory pages for subctxts if ctxt is shared; these cover * shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context. * all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job. * all have enough space for the num_subcontexts value on this job.
*/ */
__u64 subctxt_uregbase; __aligned_u64 subctxt_uregbase;
__u64 subctxt_rcvegrbuf; __aligned_u64 subctxt_rcvegrbuf;
__u64 subctxt_rcvhdrbuf; __aligned_u64 subctxt_rcvhdrbuf;
}; };
#endif /* _LINIUX__HFI1_IOCTL_H */ #endif /* _LINIUX__HFI1_IOCTL_H */
...@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry { ...@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space. * Device status and notifications from driver to user-space.
*/ */
struct hfi1_status { struct hfi1_status {
__u64 dev; /* device/hw status bits */ __aligned_u64 dev; /* device/hw status bits */
__u64 port; /* port state and status bits */ __aligned_u64 port; /* port state and status bits */
char freezemsg[0]; char freezemsg[0];
}; };
......
...@@ -37,18 +37,18 @@ ...@@ -37,18 +37,18 @@
#include <linux/types.h> #include <linux/types.h>
struct hns_roce_ib_create_cq { struct hns_roce_ib_create_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
}; };
struct hns_roce_ib_create_cq_resp { struct hns_roce_ib_create_cq_resp {
__u64 cqn; /* Only 32 bits used, 64 for compat */ __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
__u64 cap_flags; __aligned_u64 cap_flags;
}; };
struct hns_roce_ib_create_qp { struct hns_roce_ib_create_qp {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u8 log_sq_bb_count; __u8 log_sq_bb_count;
__u8 log_sq_stride; __u8 log_sq_stride;
__u8 sq_no_prefetch; __u8 sq_no_prefetch;
...@@ -56,7 +56,7 @@ struct hns_roce_ib_create_qp { ...@@ -56,7 +56,7 @@ struct hns_roce_ib_create_qp {
}; };
struct hns_roce_ib_create_qp_resp { struct hns_roce_ib_create_qp_resp {
__u64 cap_flags; __aligned_u64 cap_flags;
}; };
struct hns_roce_ib_alloc_ucontext_resp { struct hns_roce_ib_alloc_ucontext_resp {
......
...@@ -61,17 +61,17 @@ struct i40iw_alloc_pd_resp { ...@@ -61,17 +61,17 @@ struct i40iw_alloc_pd_resp {
}; };
struct i40iw_create_cq_req { struct i40iw_create_cq_req {
__u64 user_cq_buffer; __aligned_u64 user_cq_buffer;
__u64 user_shadow_area; __aligned_u64 user_shadow_area;
}; };
struct i40iw_create_qp_req { struct i40iw_create_qp_req {
__u64 user_wqe_buffers; __aligned_u64 user_wqe_buffers;
__u64 user_compl_ctx; __aligned_u64 user_compl_ctx;
/* UDA QP PHB */ /* UDA QP PHB */
__u64 user_sq_phb; /* place for VA of the sq phb buff */ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
__u64 user_rq_phb; /* place for VA of the rq phb buff */ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
}; };
enum i40iw_memreg_type { enum i40iw_memreg_type {
......
...@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr { ...@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
}; };
struct ib_ucm_create_id { struct ib_ucm_create_id {
__u64 uid; __aligned_u64 uid;
__u64 response; __aligned_u64 response;
}; };
struct ib_ucm_create_id_resp { struct ib_ucm_create_id_resp {
...@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp { ...@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
}; };
struct ib_ucm_destroy_id { struct ib_ucm_destroy_id {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 reserved; __u32 reserved;
}; };
...@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp { ...@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
}; };
struct ib_ucm_attr_id { struct ib_ucm_attr_id {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 reserved; __u32 reserved;
}; };
...@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp { ...@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
}; };
struct ib_ucm_init_qp_attr { struct ib_ucm_init_qp_attr {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 qp_state; __u32 qp_state;
}; };
...@@ -123,7 +123,7 @@ struct ib_ucm_notify { ...@@ -123,7 +123,7 @@ struct ib_ucm_notify {
}; };
struct ib_ucm_private_data { struct ib_ucm_private_data {
__u64 data; __aligned_u64 data;
__u32 id; __u32 id;
__u8 len; __u8 len;
__u8 reserved[3]; __u8 reserved[3];
...@@ -135,9 +135,9 @@ struct ib_ucm_req { ...@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type; __u32 qp_type;
__u32 psn; __u32 psn;
__be64 sid; __be64 sid;
__u64 data; __aligned_u64 data;
__u64 primary_path; __aligned_u64 primary_path;
__u64 alternate_path; __aligned_u64 alternate_path;
__u8 len; __u8 len;
__u8 peer_to_peer; __u8 peer_to_peer;
__u8 responder_resources; __u8 responder_resources;
...@@ -153,8 +153,8 @@ struct ib_ucm_req { ...@@ -153,8 +153,8 @@ struct ib_ucm_req {
}; };
struct ib_ucm_rep { struct ib_ucm_rep {
__u64 uid; __aligned_u64 uid;
__u64 data; __aligned_u64 data;
__u32 id; __u32 id;
__u32 qpn; __u32 qpn;
__u32 psn; __u32 psn;
...@@ -172,15 +172,15 @@ struct ib_ucm_rep { ...@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info { struct ib_ucm_info {
__u32 id; __u32 id;
__u32 status; __u32 status;
__u64 info; __aligned_u64 info;
__u64 data; __aligned_u64 data;
__u8 info_len; __u8 info_len;
__u8 data_len; __u8 data_len;
__u8 reserved[6]; __u8 reserved[6];
}; };
struct ib_ucm_mra { struct ib_ucm_mra {
__u64 data; __aligned_u64 data;
__u32 id; __u32 id;
__u8 len; __u8 len;
__u8 timeout; __u8 timeout;
...@@ -188,8 +188,8 @@ struct ib_ucm_mra { ...@@ -188,8 +188,8 @@ struct ib_ucm_mra {
}; };
struct ib_ucm_lap { struct ib_ucm_lap {
__u64 path; __aligned_u64 path;
__u64 data; __aligned_u64 data;
__u32 id; __u32 id;
__u8 len; __u8 len;
__u8 reserved[3]; __u8 reserved[3];
...@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req { ...@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id; __u32 id;
__u32 timeout; __u32 timeout;
__be64 sid; __be64 sid;
__u64 data; __aligned_u64 data;
__u64 path; __aligned_u64 path;
__u16 reserved_pkey; __u16 reserved_pkey;
__u8 len; __u8 len;
__u8 max_cm_retries; __u8 max_cm_retries;
...@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep { ...@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn; __u32 qpn;
__u32 qkey; __u32 qkey;
__u32 status; __u32 status;
__u64 info; __aligned_u64 info;
__u64 data; __aligned_u64 data;
__u8 info_len; __u8 info_len;
__u8 data_len; __u8 data_len;
__u8 reserved[6]; __u8 reserved[6];
...@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep { ...@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures. * event notification ABI structures.
*/ */
struct ib_ucm_event_get { struct ib_ucm_event_get {
__u64 response; __aligned_u64 response;
__u64 data; __aligned_u64 data;
__u64 info; __aligned_u64 info;
__u8 data_len; __u8 data_len;
__u8 info_len; __u8 info_len;
__u8 reserved[6]; __u8 reserved[6];
...@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp { ...@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08 #define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp { struct ib_ucm_event_resp {
__u64 uid; __aligned_u64 uid;
__u32 id; __u32 id;
__u32 event; __u32 event;
__u32 present; __u32 present;
......
...@@ -143,7 +143,7 @@ struct ib_user_mad_hdr { ...@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/ */
struct ib_user_mad { struct ib_user_mad {
struct ib_user_mad_hdr hdr; struct ib_user_mad_hdr hdr;
__u64 data[0]; __aligned_u64 data[0];
}; };
/* /*
...@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 { ...@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version; __u8 mgmt_class_version;
__u16 res; __u16 res;
__u32 flags; __u32 flags;
__u64 method_mask[2]; __aligned_u64 method_mask[2];
__u32 oui; __u32 oui;
__u8 rmpp_version; __u8 rmpp_version;
__u8 reserved[3]; __u8 reserved[3];
......
...@@ -117,13 +117,13 @@ enum { ...@@ -117,13 +117,13 @@ enum {
*/ */
struct ib_uverbs_async_event_desc { struct ib_uverbs_async_event_desc {
__u64 element; __aligned_u64 element;
__u32 event_type; /* enum ib_event_type */ __u32 event_type; /* enum ib_event_type */
__u32 reserved; __u32 reserved;
}; };
struct ib_uverbs_comp_event_desc { struct ib_uverbs_comp_event_desc {
__u64 cq_handle; __aligned_u64 cq_handle;
}; };
struct ib_uverbs_cq_moderation_caps { struct ib_uverbs_cq_moderation_caps {
...@@ -150,15 +150,15 @@ struct ib_uverbs_cmd_hdr { ...@@ -150,15 +150,15 @@ struct ib_uverbs_cmd_hdr {
}; };
struct ib_uverbs_ex_cmd_hdr { struct ib_uverbs_ex_cmd_hdr {
__u64 response; __aligned_u64 response;
__u16 provider_in_words; __u16 provider_in_words;
__u16 provider_out_words; __u16 provider_out_words;
__u32 cmd_hdr_reserved; __u32 cmd_hdr_reserved;
}; };
struct ib_uverbs_get_context { struct ib_uverbs_get_context {
__u64 response; __aligned_u64 response;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_get_context_resp { struct ib_uverbs_get_context_resp {
...@@ -167,16 +167,16 @@ struct ib_uverbs_get_context_resp { ...@@ -167,16 +167,16 @@ struct ib_uverbs_get_context_resp {
}; };
struct ib_uverbs_query_device { struct ib_uverbs_query_device {
__u64 response; __aligned_u64 response;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_query_device_resp { struct ib_uverbs_query_device_resp {
__u64 fw_ver; __aligned_u64 fw_ver;
__be64 node_guid; __be64 node_guid;
__be64 sys_image_guid; __be64 sys_image_guid;
__u64 max_mr_size; __aligned_u64 max_mr_size;
__u64 page_size_cap; __aligned_u64 page_size_cap;
__u32 vendor_id; __u32 vendor_id;
__u32 vendor_part_id; __u32 vendor_part_id;
__u32 hw_ver; __u32 hw_ver;
...@@ -221,7 +221,7 @@ struct ib_uverbs_ex_query_device { ...@@ -221,7 +221,7 @@ struct ib_uverbs_ex_query_device {
}; };
struct ib_uverbs_odp_caps { struct ib_uverbs_odp_caps {
__u64 general_caps; __aligned_u64 general_caps;
struct { struct {
__u32 rc_odp_caps; __u32 rc_odp_caps;
__u32 uc_odp_caps; __u32 uc_odp_caps;
...@@ -260,9 +260,9 @@ struct ib_uverbs_ex_query_device_resp { ...@@ -260,9 +260,9 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask; __u32 comp_mask;
__u32 response_length; __u32 response_length;
struct ib_uverbs_odp_caps odp_caps; struct ib_uverbs_odp_caps odp_caps;
__u64 timestamp_mask; __aligned_u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */ __aligned_u64 hca_core_clock; /* in KHZ */
__u64 device_cap_flags_ex; __aligned_u64 device_cap_flags_ex;
struct ib_uverbs_rss_caps rss_caps; struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq; __u32 max_wq_type_rq;
__u32 raw_packet_caps; __u32 raw_packet_caps;
...@@ -271,10 +271,10 @@ struct ib_uverbs_ex_query_device_resp { ...@@ -271,10 +271,10 @@ struct ib_uverbs_ex_query_device_resp {
}; };
struct ib_uverbs_query_port { struct ib_uverbs_query_port {
__u64 response; __aligned_u64 response;
__u8 port_num; __u8 port_num;
__u8 reserved[7]; __u8 reserved[7];
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_query_port_resp { struct ib_uverbs_query_port_resp {
...@@ -302,8 +302,8 @@ struct ib_uverbs_query_port_resp { ...@@ -302,8 +302,8 @@ struct ib_uverbs_query_port_resp {
}; };
struct ib_uverbs_alloc_pd { struct ib_uverbs_alloc_pd {
__u64 response; __aligned_u64 response;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_alloc_pd_resp { struct ib_uverbs_alloc_pd_resp {
...@@ -315,10 +315,10 @@ struct ib_uverbs_dealloc_pd { ...@@ -315,10 +315,10 @@ struct ib_uverbs_dealloc_pd {
}; };
struct ib_uverbs_open_xrcd { struct ib_uverbs_open_xrcd {
__u64 response; __aligned_u64 response;
__u32 fd; __u32 fd;
__u32 oflags; __u32 oflags;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_open_xrcd_resp { struct ib_uverbs_open_xrcd_resp {
...@@ -330,13 +330,13 @@ struct ib_uverbs_close_xrcd { ...@@ -330,13 +330,13 @@ struct ib_uverbs_close_xrcd {
}; };
struct ib_uverbs_reg_mr { struct ib_uverbs_reg_mr {
__u64 response; __aligned_u64 response;
__u64 start; __aligned_u64 start;
__u64 length; __aligned_u64 length;
__u64 hca_va; __aligned_u64 hca_va;
__u32 pd_handle; __u32 pd_handle;
__u32 access_flags; __u32 access_flags;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_reg_mr_resp { struct ib_uverbs_reg_mr_resp {
...@@ -346,12 +346,12 @@ struct ib_uverbs_reg_mr_resp { ...@@ -346,12 +346,12 @@ struct ib_uverbs_reg_mr_resp {
}; };
struct ib_uverbs_rereg_mr { struct ib_uverbs_rereg_mr {
__u64 response; __aligned_u64 response;
__u32 mr_handle; __u32 mr_handle;
__u32 flags; __u32 flags;
__u64 start; __aligned_u64 start;
__u64 length; __aligned_u64 length;
__u64 hca_va; __aligned_u64 hca_va;
__u32 pd_handle; __u32 pd_handle;
__u32 access_flags; __u32 access_flags;
}; };
...@@ -366,7 +366,7 @@ struct ib_uverbs_dereg_mr { ...@@ -366,7 +366,7 @@ struct ib_uverbs_dereg_mr {
}; };
struct ib_uverbs_alloc_mw { struct ib_uverbs_alloc_mw {
__u64 response; __aligned_u64 response;
__u32 pd_handle; __u32 pd_handle;
__u8 mw_type; __u8 mw_type;
__u8 reserved[3]; __u8 reserved[3];
...@@ -382,7 +382,7 @@ struct ib_uverbs_dealloc_mw { ...@@ -382,7 +382,7 @@ struct ib_uverbs_dealloc_mw {
}; };
struct ib_uverbs_create_comp_channel { struct ib_uverbs_create_comp_channel {
__u64 response; __aligned_u64 response;
}; };
struct ib_uverbs_create_comp_channel_resp { struct ib_uverbs_create_comp_channel_resp {
...@@ -390,13 +390,13 @@ struct ib_uverbs_create_comp_channel_resp { ...@@ -390,13 +390,13 @@ struct ib_uverbs_create_comp_channel_resp {
}; };
struct ib_uverbs_create_cq { struct ib_uverbs_create_cq {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 cqe; __u32 cqe;
__u32 comp_vector; __u32 comp_vector;
__s32 comp_channel; __s32 comp_channel;
__u32 reserved; __u32 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
enum ib_uverbs_ex_create_cq_flags { enum ib_uverbs_ex_create_cq_flags {
...@@ -405,7 +405,7 @@ enum ib_uverbs_ex_create_cq_flags { ...@@ -405,7 +405,7 @@ enum ib_uverbs_ex_create_cq_flags {
}; };
struct ib_uverbs_ex_create_cq { struct ib_uverbs_ex_create_cq {
__u64 user_handle; __aligned_u64 user_handle;
__u32 cqe; __u32 cqe;
__u32 comp_vector; __u32 comp_vector;
__s32 comp_channel; __s32 comp_channel;
...@@ -426,26 +426,26 @@ struct ib_uverbs_ex_create_cq_resp { ...@@ -426,26 +426,26 @@ struct ib_uverbs_ex_create_cq_resp {
}; };
struct ib_uverbs_resize_cq { struct ib_uverbs_resize_cq {
__u64 response; __aligned_u64 response;
__u32 cq_handle; __u32 cq_handle;
__u32 cqe; __u32 cqe;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_resize_cq_resp { struct ib_uverbs_resize_cq_resp {
__u32 cqe; __u32 cqe;
__u32 reserved; __u32 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_poll_cq { struct ib_uverbs_poll_cq {
__u64 response; __aligned_u64 response;
__u32 cq_handle; __u32 cq_handle;
__u32 ne; __u32 ne;
}; };
struct ib_uverbs_wc { struct ib_uverbs_wc {
__u64 wr_id; __aligned_u64 wr_id;
__u32 status; __u32 status;
__u32 opcode; __u32 opcode;
__u32 vendor_err; __u32 vendor_err;
...@@ -477,7 +477,7 @@ struct ib_uverbs_req_notify_cq { ...@@ -477,7 +477,7 @@ struct ib_uverbs_req_notify_cq {
}; };
struct ib_uverbs_destroy_cq { struct ib_uverbs_destroy_cq {
__u64 response; __aligned_u64 response;
__u32 cq_handle; __u32 cq_handle;
__u32 reserved; __u32 reserved;
}; };
...@@ -546,8 +546,8 @@ struct ib_uverbs_qp_attr { ...@@ -546,8 +546,8 @@ struct ib_uverbs_qp_attr {
}; };
struct ib_uverbs_create_qp { struct ib_uverbs_create_qp {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 send_cq_handle; __u32 send_cq_handle;
__u32 recv_cq_handle; __u32 recv_cq_handle;
...@@ -561,7 +561,7 @@ struct ib_uverbs_create_qp { ...@@ -561,7 +561,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type; __u8 qp_type;
__u8 is_srq; __u8 is_srq;
__u8 reserved; __u8 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
enum ib_uverbs_create_qp_mask { enum ib_uverbs_create_qp_mask {
...@@ -587,7 +587,7 @@ enum { ...@@ -587,7 +587,7 @@ enum {
}; };
struct ib_uverbs_ex_create_qp { struct ib_uverbs_ex_create_qp {
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 send_cq_handle; __u32 send_cq_handle;
__u32 recv_cq_handle; __u32 recv_cq_handle;
...@@ -608,13 +608,13 @@ struct ib_uverbs_ex_create_qp { ...@@ -608,13 +608,13 @@ struct ib_uverbs_ex_create_qp {
}; };
struct ib_uverbs_open_qp { struct ib_uverbs_open_qp {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 qpn; __u32 qpn;
__u8 qp_type; __u8 qp_type;
__u8 reserved[7]; __u8 reserved[7];
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
/* also used for open response */ /* also used for open response */
...@@ -655,10 +655,10 @@ struct ib_uverbs_qp_dest { ...@@ -655,10 +655,10 @@ struct ib_uverbs_qp_dest {
}; };
struct ib_uverbs_query_qp { struct ib_uverbs_query_qp {
__u64 response; __aligned_u64 response;
__u32 qp_handle; __u32 qp_handle;
__u32 attr_mask; __u32 attr_mask;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_query_qp_resp { struct ib_uverbs_query_qp_resp {
...@@ -692,7 +692,7 @@ struct ib_uverbs_query_qp_resp { ...@@ -692,7 +692,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout; __u8 alt_timeout;
__u8 sq_sig_all; __u8 sq_sig_all;
__u8 reserved[5]; __u8 reserved[5];
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_modify_qp { struct ib_uverbs_modify_qp {
...@@ -722,7 +722,7 @@ struct ib_uverbs_modify_qp { ...@@ -722,7 +722,7 @@ struct ib_uverbs_modify_qp {
__u8 alt_port_num; __u8 alt_port_num;
__u8 alt_timeout; __u8 alt_timeout;
__u8 reserved[2]; __u8 reserved[2];
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_ex_modify_qp { struct ib_uverbs_ex_modify_qp {
...@@ -740,7 +740,7 @@ struct ib_uverbs_ex_modify_qp_resp { ...@@ -740,7 +740,7 @@ struct ib_uverbs_ex_modify_qp_resp {
}; };
struct ib_uverbs_destroy_qp { struct ib_uverbs_destroy_qp {
__u64 response; __aligned_u64 response;
__u32 qp_handle; __u32 qp_handle;
__u32 reserved; __u32 reserved;
}; };
...@@ -756,13 +756,13 @@ struct ib_uverbs_destroy_qp_resp { ...@@ -756,13 +756,13 @@ struct ib_uverbs_destroy_qp_resp {
* document the ABI. * document the ABI.
*/ */
struct ib_uverbs_sge { struct ib_uverbs_sge {
__u64 addr; __aligned_u64 addr;
__u32 length; __u32 length;
__u32 lkey; __u32 lkey;
}; };
struct ib_uverbs_send_wr { struct ib_uverbs_send_wr {
__u64 wr_id; __aligned_u64 wr_id;
__u32 num_sge; __u32 num_sge;
__u32 opcode; __u32 opcode;
__u32 send_flags; __u32 send_flags;
...@@ -772,14 +772,14 @@ struct ib_uverbs_send_wr { ...@@ -772,14 +772,14 @@ struct ib_uverbs_send_wr {
} ex; } ex;
union { union {
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u32 rkey; __u32 rkey;
__u32 reserved; __u32 reserved;
} rdma; } rdma;
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u64 compare_add; __aligned_u64 compare_add;
__u64 swap; __aligned_u64 swap;
__u32 rkey; __u32 rkey;
__u32 reserved; __u32 reserved;
} atomic; } atomic;
...@@ -793,7 +793,7 @@ struct ib_uverbs_send_wr { ...@@ -793,7 +793,7 @@ struct ib_uverbs_send_wr {
}; };
struct ib_uverbs_post_send { struct ib_uverbs_post_send {
__u64 response; __aligned_u64 response;
__u32 qp_handle; __u32 qp_handle;
__u32 wr_count; __u32 wr_count;
__u32 sge_count; __u32 sge_count;
...@@ -806,13 +806,13 @@ struct ib_uverbs_post_send_resp { ...@@ -806,13 +806,13 @@ struct ib_uverbs_post_send_resp {
}; };
struct ib_uverbs_recv_wr { struct ib_uverbs_recv_wr {
__u64 wr_id; __aligned_u64 wr_id;
__u32 num_sge; __u32 num_sge;
__u32 reserved; __u32 reserved;
}; };
struct ib_uverbs_post_recv { struct ib_uverbs_post_recv {
__u64 response; __aligned_u64 response;
__u32 qp_handle; __u32 qp_handle;
__u32 wr_count; __u32 wr_count;
__u32 sge_count; __u32 sge_count;
...@@ -825,7 +825,7 @@ struct ib_uverbs_post_recv_resp { ...@@ -825,7 +825,7 @@ struct ib_uverbs_post_recv_resp {
}; };
struct ib_uverbs_post_srq_recv { struct ib_uverbs_post_srq_recv {
__u64 response; __aligned_u64 response;
__u32 srq_handle; __u32 srq_handle;
__u32 wr_count; __u32 wr_count;
__u32 sge_count; __u32 sge_count;
...@@ -838,8 +838,8 @@ struct ib_uverbs_post_srq_recv_resp { ...@@ -838,8 +838,8 @@ struct ib_uverbs_post_srq_recv_resp {
}; };
struct ib_uverbs_create_ah { struct ib_uverbs_create_ah {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 reserved; __u32 reserved;
struct ib_uverbs_ah_attr attr; struct ib_uverbs_ah_attr attr;
...@@ -858,7 +858,7 @@ struct ib_uverbs_attach_mcast { ...@@ -858,7 +858,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle; __u32 qp_handle;
__u16 mlid; __u16 mlid;
__u16 reserved; __u16 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_detach_mcast { struct ib_uverbs_detach_mcast {
...@@ -866,7 +866,7 @@ struct ib_uverbs_detach_mcast { ...@@ -866,7 +866,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle; __u32 qp_handle;
__u16 mlid; __u16 mlid;
__u16 reserved; __u16 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_flow_spec_hdr { struct ib_uverbs_flow_spec_hdr {
...@@ -874,7 +874,7 @@ struct ib_uverbs_flow_spec_hdr { ...@@ -874,7 +874,7 @@ struct ib_uverbs_flow_spec_hdr {
__u16 size; __u16 size;
__u16 reserved; __u16 reserved;
/* followed by flow_spec */ /* followed by flow_spec */
__u64 flow_spec_data[0]; __aligned_u64 flow_spec_data[0];
}; };
struct ib_uverbs_flow_eth_filter { struct ib_uverbs_flow_eth_filter {
...@@ -1033,18 +1033,18 @@ struct ib_uverbs_destroy_flow { ...@@ -1033,18 +1033,18 @@ struct ib_uverbs_destroy_flow {
}; };
struct ib_uverbs_create_srq { struct ib_uverbs_create_srq {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 max_wr; __u32 max_wr;
__u32 max_sge; __u32 max_sge;
__u32 srq_limit; __u32 srq_limit;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_create_xsrq { struct ib_uverbs_create_xsrq {
__u64 response; __aligned_u64 response;
__u64 user_handle; __aligned_u64 user_handle;
__u32 srq_type; __u32 srq_type;
__u32 pd_handle; __u32 pd_handle;
__u32 max_wr; __u32 max_wr;
...@@ -1053,7 +1053,7 @@ struct ib_uverbs_create_xsrq { ...@@ -1053,7 +1053,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags; __u32 max_num_tags;
__u32 xrcd_handle; __u32 xrcd_handle;
__u32 cq_handle; __u32 cq_handle;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_create_srq_resp { struct ib_uverbs_create_srq_resp {
...@@ -1068,14 +1068,14 @@ struct ib_uverbs_modify_srq { ...@@ -1068,14 +1068,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask; __u32 attr_mask;
__u32 max_wr; __u32 max_wr;
__u32 srq_limit; __u32 srq_limit;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_query_srq { struct ib_uverbs_query_srq {
__u64 response; __aligned_u64 response;
__u32 srq_handle; __u32 srq_handle;
__u32 reserved; __u32 reserved;
__u64 driver_data[0]; __aligned_u64 driver_data[0];
}; };
struct ib_uverbs_query_srq_resp { struct ib_uverbs_query_srq_resp {
...@@ -1086,7 +1086,7 @@ struct ib_uverbs_query_srq_resp { ...@@ -1086,7 +1086,7 @@ struct ib_uverbs_query_srq_resp {
}; };
struct ib_uverbs_destroy_srq { struct ib_uverbs_destroy_srq {
__u64 response; __aligned_u64 response;
__u32 srq_handle; __u32 srq_handle;
__u32 reserved; __u32 reserved;
}; };
...@@ -1098,7 +1098,7 @@ struct ib_uverbs_destroy_srq_resp { ...@@ -1098,7 +1098,7 @@ struct ib_uverbs_destroy_srq_resp {
struct ib_uverbs_ex_create_wq { struct ib_uverbs_ex_create_wq {
__u32 comp_mask; __u32 comp_mask;
__u32 wq_type; __u32 wq_type;
__u64 user_handle; __aligned_u64 user_handle;
__u32 pd_handle; __u32 pd_handle;
__u32 cq_handle; __u32 cq_handle;
__u32 max_wr; __u32 max_wr;
......
...@@ -77,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp { ...@@ -77,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
}; };
struct mlx4_ib_create_cq { struct mlx4_ib_create_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
}; };
struct mlx4_ib_create_cq_resp { struct mlx4_ib_create_cq_resp {
...@@ -87,12 +87,12 @@ struct mlx4_ib_create_cq_resp { ...@@ -87,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
}; };
struct mlx4_ib_resize_cq { struct mlx4_ib_resize_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
}; };
struct mlx4_ib_create_srq { struct mlx4_ib_create_srq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
}; };
struct mlx4_ib_create_srq_resp { struct mlx4_ib_create_srq_resp {
...@@ -101,7 +101,7 @@ struct mlx4_ib_create_srq_resp { ...@@ -101,7 +101,7 @@ struct mlx4_ib_create_srq_resp {
}; };
struct mlx4_ib_create_qp_rss { struct mlx4_ib_create_qp_rss {
__u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7]; __u8 reserved[7];
__u8 rx_hash_key[40]; __u8 rx_hash_key[40];
...@@ -110,8 +110,8 @@ struct mlx4_ib_create_qp_rss { ...@@ -110,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
}; };
struct mlx4_ib_create_qp { struct mlx4_ib_create_qp {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u8 log_sq_bb_count; __u8 log_sq_bb_count;
__u8 log_sq_stride; __u8 log_sq_stride;
__u8 sq_no_prefetch; __u8 sq_no_prefetch;
...@@ -120,8 +120,8 @@ struct mlx4_ib_create_qp { ...@@ -120,8 +120,8 @@ struct mlx4_ib_create_qp {
}; };
struct mlx4_ib_create_wq { struct mlx4_ib_create_wq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u8 log_range_size; __u8 log_range_size;
__u8 reserved[3]; __u8 reserved[3];
__u32 comp_mask; __u32 comp_mask;
...@@ -161,7 +161,7 @@ enum mlx4_ib_rx_hash_fields { ...@@ -161,7 +161,7 @@ enum mlx4_ib_rx_hash_fields {
}; };
struct mlx4_ib_rss_caps { struct mlx4_ib_rss_caps {
__u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */ __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7]; __u8 reserved[7];
}; };
...@@ -181,7 +181,7 @@ struct mlx4_ib_tso_caps { ...@@ -181,7 +181,7 @@ struct mlx4_ib_tso_caps {
struct mlx4_uverbs_ex_query_device_resp { struct mlx4_uverbs_ex_query_device_resp {
__u32 comp_mask; __u32 comp_mask;
__u32 response_length; __u32 response_length;
__u64 hca_core_clock_offset; __aligned_u64 hca_core_clock_offset;
__u32 max_inl_recv_sz; __u32 max_inl_recv_sz;
__u32 reserved; __u32 reserved;
struct mlx4_ib_rss_caps rss_caps; struct mlx4_ib_rss_caps rss_caps;
......
...@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 { ...@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0; __u8 reserved0;
__u16 reserved1; __u16 reserved1;
__u32 reserved2; __u32 reserved2;
__u64 lib_caps; __aligned_u64 lib_caps;
}; };
enum mlx5_ib_alloc_ucontext_resp_mask { enum mlx5_ib_alloc_ucontext_resp_mask {
...@@ -125,7 +125,7 @@ struct mlx5_ib_alloc_ucontext_resp { ...@@ -125,7 +125,7 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cmds_supp_uhw; __u8 cmds_supp_uhw;
__u8 eth_min_inline; __u8 eth_min_inline;
__u8 clock_info_versions; __u8 clock_info_versions;
__u64 hca_core_clock_offset; __aligned_u64 hca_core_clock_offset;
__u32 log_uar_size; __u32 log_uar_size;
__u32 num_uars_per_page; __u32 num_uars_per_page;
__u32 num_dyn_bfregs; __u32 num_dyn_bfregs;
...@@ -147,7 +147,7 @@ struct mlx5_ib_tso_caps { ...@@ -147,7 +147,7 @@ struct mlx5_ib_tso_caps {
}; };
struct mlx5_ib_rss_caps { struct mlx5_ib_rss_caps {
__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7]; __u8 reserved[7];
}; };
...@@ -248,8 +248,8 @@ enum mlx5_ib_create_cq_flags { ...@@ -248,8 +248,8 @@ enum mlx5_ib_create_cq_flags {
}; };
struct mlx5_ib_create_cq { struct mlx5_ib_create_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u32 cqe_size; __u32 cqe_size;
__u8 cqe_comp_en; __u8 cqe_comp_en;
__u8 cqe_comp_res_format; __u8 cqe_comp_res_format;
...@@ -262,15 +262,15 @@ struct mlx5_ib_create_cq_resp { ...@@ -262,15 +262,15 @@ struct mlx5_ib_create_cq_resp {
}; };
struct mlx5_ib_resize_cq { struct mlx5_ib_resize_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u16 cqe_size; __u16 cqe_size;
__u16 reserved0; __u16 reserved0;
__u32 reserved1; __u32 reserved1;
}; };
struct mlx5_ib_create_srq { struct mlx5_ib_create_srq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u32 flags; __u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */ __u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx; __u32 uidx;
...@@ -283,8 +283,8 @@ struct mlx5_ib_create_srq_resp { ...@@ -283,8 +283,8 @@ struct mlx5_ib_create_srq_resp {
}; };
struct mlx5_ib_create_qp { struct mlx5_ib_create_qp {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u32 sq_wqe_count; __u32 sq_wqe_count;
__u32 rq_wqe_count; __u32 rq_wqe_count;
__u32 rq_wqe_shift; __u32 rq_wqe_shift;
...@@ -292,8 +292,8 @@ struct mlx5_ib_create_qp { ...@@ -292,8 +292,8 @@ struct mlx5_ib_create_qp {
__u32 uidx; __u32 uidx;
__u32 bfreg_index; __u32 bfreg_index;
union { union {
__u64 sq_buf_addr; __aligned_u64 sq_buf_addr;
__u64 access_key; __aligned_u64 access_key;
}; };
}; };
...@@ -324,7 +324,7 @@ enum mlx5_rx_hash_fields { ...@@ -324,7 +324,7 @@ enum mlx5_rx_hash_fields {
}; };
struct mlx5_ib_create_qp_rss { struct mlx5_ib_create_qp_rss {
__u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */ __u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6]; __u8 reserved[6];
...@@ -349,8 +349,8 @@ enum mlx5_ib_create_wq_mask { ...@@ -349,8 +349,8 @@ enum mlx5_ib_create_wq_mask {
}; };
struct mlx5_ib_create_wq { struct mlx5_ib_create_wq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u64 db_addr; __aligned_u64 db_addr;
__u32 rq_wqe_count; __u32 rq_wqe_count;
__u32 rq_wqe_shift; __u32 rq_wqe_shift;
__u32 user_index; __u32 user_index;
...@@ -402,13 +402,13 @@ struct mlx5_ib_modify_wq { ...@@ -402,13 +402,13 @@ struct mlx5_ib_modify_wq {
struct mlx5_ib_clock_info { struct mlx5_ib_clock_info {
__u32 sign; __u32 sign;
__u32 resv; __u32 resv;
__u64 nsec; __aligned_u64 nsec;
__u64 cycles; __aligned_u64 cycles;
__u64 frac; __aligned_u64 frac;
__u32 mult; __u32 mult;
__u32 shift; __u32 shift;
__u64 mask; __aligned_u64 mask;
__u64 overflow_period; __aligned_u64 overflow_period;
}; };
enum mlx5_ib_mmap_cmd { enum mlx5_ib_mmap_cmd {
......
...@@ -74,8 +74,8 @@ struct mthca_reg_mr { ...@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq { struct mthca_create_cq {
__u32 lkey; __u32 lkey;
__u32 pdn; __u32 pdn;
__u64 arm_db_page; __aligned_u64 arm_db_page;
__u64 set_db_page; __aligned_u64 set_db_page;
__u32 arm_db_index; __u32 arm_db_index;
__u32 set_db_index; __u32 set_db_index;
}; };
...@@ -93,7 +93,7 @@ struct mthca_resize_cq { ...@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq { struct mthca_create_srq {
__u32 lkey; __u32 lkey;
__u32 db_index; __u32 db_index;
__u64 db_page; __aligned_u64 db_page;
}; };
struct mthca_create_srq_resp { struct mthca_create_srq_resp {
...@@ -104,8 +104,8 @@ struct mthca_create_srq_resp { ...@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp { struct mthca_create_qp {
__u32 lkey; __u32 lkey;
__u32 reserved; __u32 reserved;
__u64 sq_db_page; __aligned_u64 sq_db_page;
__u64 rq_db_page; __aligned_u64 rq_db_page;
__u32 sq_db_index; __u32 sq_db_index;
__u32 rq_db_index; __u32 rq_db_index;
}; };
......
...@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp { ...@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
}; };
struct nes_create_cq_req { struct nes_create_cq_req {
__u64 user_cq_buffer; __aligned_u64 user_cq_buffer;
__u32 mcrqf; __u32 mcrqf;
__u8 reserved[4]; __u8 reserved[4];
}; };
struct nes_create_qp_req { struct nes_create_qp_req {
__u64 user_wqe_buffers; __aligned_u64 user_wqe_buffers;
__u64 user_qp_buffer; __aligned_u64 user_qp_buffer;
}; };
enum iwnes_memreg_type { enum iwnes_memreg_type {
......
...@@ -55,13 +55,13 @@ struct ocrdma_alloc_ucontext_resp { ...@@ -55,13 +55,13 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size; __u32 wqe_size;
__u32 max_inline_data; __u32 max_inline_data;
__u32 dpp_wqe_size; __u32 dpp_wqe_size;
__u64 ah_tbl_page; __aligned_u64 ah_tbl_page;
__u32 ah_tbl_len; __u32 ah_tbl_len;
__u32 rqe_size; __u32 rqe_size;
__u8 fw_ver[32]; __u8 fw_ver[32];
/* for future use/new features in progress */ /* for future use/new features in progress */
__u64 rsvd1; __aligned_u64 rsvd1;
__u64 rsvd2; __aligned_u64 rsvd2;
}; };
struct ocrdma_alloc_pd_ureq { struct ocrdma_alloc_pd_ureq {
...@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp { ...@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size; __u32 page_size;
__u32 num_pages; __u32 num_pages;
__u32 max_hw_cqe; __u32 max_hw_cqe;
__u64 page_addr[MAX_CQ_PAGES]; __aligned_u64 page_addr[MAX_CQ_PAGES];
__u64 db_page_addr; __aligned_u64 db_page_addr;
__u32 db_page_size; __u32 db_page_size;
__u32 phase_change; __u32 phase_change;
/* for future use/new features in progress */ /* for future use/new features in progress */
__u64 rsvd1; __aligned_u64 rsvd1;
__u64 rsvd2; __aligned_u64 rsvd2;
}; };
#define MAX_QP_PAGES 8 #define MAX_QP_PAGES 8
...@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp { ...@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size; __u32 rq_page_size;
__u32 num_sq_pages; __u32 num_sq_pages;
__u32 num_rq_pages; __u32 num_rq_pages;
__u64 sq_page_addr[MAX_QP_PAGES]; __aligned_u64 sq_page_addr[MAX_QP_PAGES];
__u64 rq_page_addr[MAX_QP_PAGES]; __aligned_u64 rq_page_addr[MAX_QP_PAGES];
__u64 db_page_addr; __aligned_u64 db_page_addr;
__u32 db_page_size; __u32 db_page_size;
__u32 dpp_credit; __u32 dpp_credit;
__u32 dpp_offset; __u32 dpp_offset;
...@@ -126,7 +126,7 @@ struct ocrdma_create_qp_uresp { ...@@ -126,7 +126,7 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset; __u32 db_sq_offset;
__u32 db_rq_offset; __u32 db_rq_offset;
__u32 db_shift; __u32 db_shift;
__u64 rsvd[11]; __aligned_u64 rsvd[11];
}; };
struct ocrdma_create_srq_uresp { struct ocrdma_create_srq_uresp {
...@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp { ...@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size; __u32 rq_page_size;
__u32 num_rq_pages; __u32 num_rq_pages;
__u64 rq_page_addr[MAX_QP_PAGES]; __aligned_u64 rq_page_addr[MAX_QP_PAGES];
__u64 db_page_addr; __aligned_u64 db_page_addr;
__u32 db_page_size; __u32 db_page_size;
__u32 num_rqe_allocated; __u32 num_rqe_allocated;
__u32 db_rq_offset; __u32 db_rq_offset;
__u32 db_shift; __u32 db_shift;
__u64 rsvd2; __aligned_u64 rsvd2;
__u64 rsvd3; __aligned_u64 rsvd3;
}; };
#endif /* OCRDMA_ABI_USER_H */ #endif /* OCRDMA_ABI_USER_H */
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
/* user kernel communication data structures. */ /* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp { struct qedr_alloc_ucontext_resp {
__u64 db_pa; __aligned_u64 db_pa;
__u32 db_size; __u32 db_size;
__u32 max_send_wr; __u32 max_send_wr;
...@@ -57,7 +57,7 @@ struct qedr_alloc_ucontext_resp { ...@@ -57,7 +57,7 @@ struct qedr_alloc_ucontext_resp {
}; };
struct qedr_alloc_pd_ureq { struct qedr_alloc_pd_ureq {
__u64 rsvd1; __aligned_u64 rsvd1;
}; };
struct qedr_alloc_pd_uresp { struct qedr_alloc_pd_uresp {
...@@ -66,8 +66,8 @@ struct qedr_alloc_pd_uresp { ...@@ -66,8 +66,8 @@ struct qedr_alloc_pd_uresp {
}; };
struct qedr_create_cq_ureq { struct qedr_create_cq_ureq {
__u64 addr; __aligned_u64 addr;
__u64 len; __aligned_u64 len;
}; };
struct qedr_create_cq_uresp { struct qedr_create_cq_uresp {
...@@ -82,17 +82,17 @@ struct qedr_create_qp_ureq { ...@@ -82,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */ /* SQ */
/* user space virtual address of SQ buffer */ /* user space virtual address of SQ buffer */
__u64 sq_addr; __aligned_u64 sq_addr;
/* length of SQ buffer */ /* length of SQ buffer */
__u64 sq_len; __aligned_u64 sq_len;
/* RQ */ /* RQ */
/* user space virtual address of RQ buffer */ /* user space virtual address of RQ buffer */
__u64 rq_addr; __aligned_u64 rq_addr;
/* length of RQ buffer */ /* length of RQ buffer */
__u64 rq_len; __aligned_u64 rq_len;
}; };
struct qedr_create_qp_uresp { struct qedr_create_qp_uresp {
......
...@@ -80,8 +80,8 @@ struct rdma_ucm_cmd_hdr { ...@@ -80,8 +80,8 @@ struct rdma_ucm_cmd_hdr {
}; };
struct rdma_ucm_create_id { struct rdma_ucm_create_id {
__u64 uid; __aligned_u64 uid;
__u64 response; __aligned_u64 response;
__u16 ps; __u16 ps;
__u8 qp_type; __u8 qp_type;
__u8 reserved[5]; __u8 reserved[5];
...@@ -92,7 +92,7 @@ struct rdma_ucm_create_id_resp { ...@@ -92,7 +92,7 @@ struct rdma_ucm_create_id_resp {
}; };
struct rdma_ucm_destroy_id { struct rdma_ucm_destroy_id {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 reserved; __u32 reserved;
}; };
...@@ -102,7 +102,7 @@ struct rdma_ucm_destroy_id_resp { ...@@ -102,7 +102,7 @@ struct rdma_ucm_destroy_id_resp {
}; };
struct rdma_ucm_bind_ip { struct rdma_ucm_bind_ip {
__u64 response; __aligned_u64 response;
struct sockaddr_in6 addr; struct sockaddr_in6 addr;
__u32 id; __u32 id;
}; };
...@@ -143,13 +143,13 @@ enum { ...@@ -143,13 +143,13 @@ enum {
}; };
struct rdma_ucm_query { struct rdma_ucm_query {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 option; __u32 option;
}; };
struct rdma_ucm_query_route_resp { struct rdma_ucm_query_route_resp {
__u64 node_guid; __aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2]; struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr; struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr; struct sockaddr_in6 dst_addr;
...@@ -159,7 +159,7 @@ struct rdma_ucm_query_route_resp { ...@@ -159,7 +159,7 @@ struct rdma_ucm_query_route_resp {
}; };
struct rdma_ucm_query_addr_resp { struct rdma_ucm_query_addr_resp {
__u64 node_guid; __aligned_u64 node_guid;
__u8 port_num; __u8 port_num;
__u8 reserved; __u8 reserved;
__u16 pkey; __u16 pkey;
...@@ -210,7 +210,7 @@ struct rdma_ucm_listen { ...@@ -210,7 +210,7 @@ struct rdma_ucm_listen {
}; };
struct rdma_ucm_accept { struct rdma_ucm_accept {
__u64 uid; __aligned_u64 uid;
struct rdma_ucm_conn_param conn_param; struct rdma_ucm_conn_param conn_param;
__u32 id; __u32 id;
__u32 reserved; __u32 reserved;
...@@ -228,7 +228,7 @@ struct rdma_ucm_disconnect { ...@@ -228,7 +228,7 @@ struct rdma_ucm_disconnect {
}; };
struct rdma_ucm_init_qp_attr { struct rdma_ucm_init_qp_attr {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 qp_state; __u32 qp_state;
}; };
...@@ -239,8 +239,8 @@ struct rdma_ucm_notify { ...@@ -239,8 +239,8 @@ struct rdma_ucm_notify {
}; };
struct rdma_ucm_join_ip_mcast { struct rdma_ucm_join_ip_mcast {
__u64 response; /* rdma_ucm_create_id_resp */ __aligned_u64 response; /* rdma_ucm_create_id_resp */
__u64 uid; __aligned_u64 uid;
struct sockaddr_in6 addr; struct sockaddr_in6 addr;
__u32 id; __u32 id;
}; };
...@@ -253,8 +253,8 @@ enum { ...@@ -253,8 +253,8 @@ enum {
}; };
struct rdma_ucm_join_mcast { struct rdma_ucm_join_mcast {
__u64 response; /* rdma_ucma_create_id_resp */ __aligned_u64 response; /* rdma_ucma_create_id_resp */
__u64 uid; __aligned_u64 uid;
__u32 id; __u32 id;
__u16 addr_size; __u16 addr_size;
__u16 join_flags; __u16 join_flags;
...@@ -262,11 +262,11 @@ struct rdma_ucm_join_mcast { ...@@ -262,11 +262,11 @@ struct rdma_ucm_join_mcast {
}; };
struct rdma_ucm_get_event { struct rdma_ucm_get_event {
__u64 response; __aligned_u64 response;
}; };
struct rdma_ucm_event_resp { struct rdma_ucm_event_resp {
__u64 uid; __aligned_u64 uid;
__u32 id; __u32 id;
__u32 event; __u32 event;
__u32 status; __u32 status;
...@@ -296,7 +296,7 @@ enum { ...@@ -296,7 +296,7 @@ enum {
}; };
struct rdma_ucm_set_option { struct rdma_ucm_set_option {
__u64 optval; __aligned_u64 optval;
__u32 id; __u32 id;
__u32 level; __u32 level;
__u32 optname; __u32 optname;
...@@ -304,7 +304,7 @@ struct rdma_ucm_set_option { ...@@ -304,7 +304,7 @@ struct rdma_ucm_set_option {
}; };
struct rdma_ucm_migrate_id { struct rdma_ucm_migrate_id {
__u64 response; __aligned_u64 response;
__u32 id; __u32 id;
__u32 fd; __u32 fd;
}; };
......
...@@ -68,7 +68,7 @@ struct rxe_av { ...@@ -68,7 +68,7 @@ struct rxe_av {
}; };
struct rxe_send_wr { struct rxe_send_wr {
__u64 wr_id; __aligned_u64 wr_id;
__u32 num_sge; __u32 num_sge;
__u32 opcode; __u32 opcode;
__u32 send_flags; __u32 send_flags;
...@@ -78,14 +78,14 @@ struct rxe_send_wr { ...@@ -78,14 +78,14 @@ struct rxe_send_wr {
} ex; } ex;
union { union {
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u32 rkey; __u32 rkey;
__u32 reserved; __u32 reserved;
} rdma; } rdma;
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u64 compare_add; __aligned_u64 compare_add;
__u64 swap; __aligned_u64 swap;
__u32 rkey; __u32 rkey;
__u32 reserved; __u32 reserved;
} atomic; } atomic;
...@@ -98,7 +98,7 @@ struct rxe_send_wr { ...@@ -98,7 +98,7 @@ struct rxe_send_wr {
struct { struct {
union { union {
struct ib_mr *mr; struct ib_mr *mr;
__u64 reserved; __aligned_u64 reserved;
}; };
__u32 key; __u32 key;
__u32 access; __u32 access;
...@@ -107,13 +107,13 @@ struct rxe_send_wr { ...@@ -107,13 +107,13 @@ struct rxe_send_wr {
}; };
struct rxe_sge { struct rxe_sge {
__u64 addr; __aligned_u64 addr;
__u32 length; __u32 length;
__u32 lkey; __u32 lkey;
}; };
struct mminfo { struct mminfo {
__u64 offset; __aligned_u64 offset;
__u32 size; __u32 size;
__u32 pad; __u32 pad;
}; };
...@@ -136,7 +136,7 @@ struct rxe_send_wqe { ...@@ -136,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av; struct rxe_av av;
__u32 status; __u32 status;
__u32 state; __u32 state;
__u64 iova; __aligned_u64 iova;
__u32 mask; __u32 mask;
__u32 first_psn; __u32 first_psn;
__u32 last_psn; __u32 last_psn;
...@@ -147,7 +147,7 @@ struct rxe_send_wqe { ...@@ -147,7 +147,7 @@ struct rxe_send_wqe {
}; };
struct rxe_recv_wqe { struct rxe_recv_wqe {
__u64 wr_id; __aligned_u64 wr_id;
__u32 num_sge; __u32 num_sge;
__u32 padding; __u32 padding;
struct rxe_dma_info dma; struct rxe_dma_info dma;
...@@ -173,7 +173,7 @@ struct rxe_create_srq_resp { ...@@ -173,7 +173,7 @@ struct rxe_create_srq_resp {
}; };
struct rxe_modify_srq_cmd { struct rxe_modify_srq_cmd {
__u64 mmap_info_addr; __aligned_u64 mmap_info_addr;
}; };
#endif /* RDMA_USER_RXE_H */ #endif /* RDMA_USER_RXE_H */
...@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp { ...@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
}; };
struct pvrdma_create_cq { struct pvrdma_create_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u32 buf_size; __u32 buf_size;
__u32 reserved; __u32 reserved;
}; };
...@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp { ...@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp {
}; };
struct pvrdma_resize_cq { struct pvrdma_resize_cq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u32 buf_size; __u32 buf_size;
__u32 reserved; __u32 reserved;
}; };
struct pvrdma_create_srq { struct pvrdma_create_srq {
__u64 buf_addr; __aligned_u64 buf_addr;
__u32 buf_size; __u32 buf_size;
__u32 reserved; __u32 reserved;
}; };
...@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp { ...@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp {
}; };
struct pvrdma_create_qp { struct pvrdma_create_qp {
__u64 rbuf_addr; __aligned_u64 rbuf_addr;
__u64 sbuf_addr; __aligned_u64 sbuf_addr;
__u32 rbuf_size; __u32 rbuf_size;
__u32 sbuf_size; __u32 sbuf_size;
__u64 qp_addr; __aligned_u64 qp_addr;
}; };
/* PVRDMA masked atomic compare and swap */ /* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap { struct pvrdma_ex_cmp_swap {
__u64 swap_val; __aligned_u64 swap_val;
__u64 compare_val; __aligned_u64 compare_val;
__u64 swap_mask; __aligned_u64 swap_mask;
__u64 compare_mask; __aligned_u64 compare_mask;
}; };
/* PVRDMA masked atomic fetch and add */ /* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add { struct pvrdma_ex_fetch_add {
__u64 add_val; __aligned_u64 add_val;
__u64 field_boundary; __aligned_u64 field_boundary;
}; };
/* PVRDMA address vector. */ /* PVRDMA address vector. */
...@@ -207,14 +207,14 @@ struct pvrdma_av { ...@@ -207,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */ /* PVRDMA scatter/gather entry */
struct pvrdma_sge { struct pvrdma_sge {
__u64 addr; __aligned_u64 addr;
__u32 length; __u32 length;
__u32 lkey; __u32 lkey;
}; };
/* PVRDMA receive queue work request */ /* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr { struct pvrdma_rq_wqe_hdr {
__u64 wr_id; /* wr id */ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */ __u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */ __u32 total_len; /* reserved */
}; };
...@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr { ...@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */ /* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr { struct pvrdma_sq_wqe_hdr {
__u64 wr_id; /* wr id */ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */ __u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */ __u32 total_len; /* reserved */
__u32 opcode; /* operation type */ __u32 opcode; /* operation type */
...@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr { ...@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved; __u32 reserved;
union { union {
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u32 rkey; __u32 rkey;
__u8 reserved[4]; __u8 reserved[4];
} rdma; } rdma;
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u64 compare_add; __aligned_u64 compare_add;
__u64 swap; __aligned_u64 swap;
__u32 rkey; __u32 rkey;
__u32 reserved; __u32 reserved;
} atomic; } atomic;
struct { struct {
__u64 remote_addr; __aligned_u64 remote_addr;
__u32 log_arg_sz; __u32 log_arg_sz;
__u32 rkey; __u32 rkey;
union { union {
...@@ -255,8 +255,8 @@ struct pvrdma_sq_wqe_hdr { ...@@ -255,8 +255,8 @@ struct pvrdma_sq_wqe_hdr {
} wr_data; } wr_data;
} masked_atomics; } masked_atomics;
struct { struct {
__u64 iova_start; __aligned_u64 iova_start;
__u64 pl_pdir_dma; __aligned_u64 pl_pdir_dma;
__u32 page_shift; __u32 page_shift;
__u32 page_list_len; __u32 page_list_len;
__u32 length; __u32 length;
...@@ -275,8 +275,8 @@ struct pvrdma_sq_wqe_hdr { ...@@ -275,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */ /* Completion queue element. */
struct pvrdma_cqe { struct pvrdma_cqe {
__u64 wr_id; __aligned_u64 wr_id;
__u64 qp; __aligned_u64 qp;
__u32 opcode; __u32 opcode;
__u32 status; __u32 status;
__u32 byte_len; __u32 byte_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment