Commit 19b1a294 authored by Erez Alfasi's avatar Erez Alfasi Committed by Jason Gunthorpe

RDMA: Use __packed annotation instead of __attribute__ ((packed))

"__attribute__" set of macros has been standardized, have became more
potentially portable and consistent code back in v2.6.21 by commit
82ddcb04 ("[PATCH] extend the set of "__attribute__" shortcut macros").
Moreover, nowadays checkpatch.pl warns about using __attribute__((packed))
instead of __packed.

This patch converts all the "__attribute__ ((packed))" annotations to
"__packed" within the RDMA subsystem.
Signed-off-by: default avatarErez Alfasi <ereza@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d0a93556
......@@ -98,7 +98,7 @@ struct cm_req_msg {
u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
} __packed;
static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
{
......@@ -423,7 +423,7 @@ enum cm_msg_response {
u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
{
......@@ -461,7 +461,7 @@ struct cm_rej_msg {
u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
{
......@@ -506,7 +506,7 @@ struct cm_rep_msg {
u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
{
......@@ -614,7 +614,7 @@ struct cm_rtu_msg {
u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
struct cm_dreq_msg {
struct ib_mad_hdr hdr;
......@@ -626,7 +626,7 @@ struct cm_dreq_msg {
u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
{
......@@ -647,7 +647,7 @@ struct cm_drep_msg {
u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
struct cm_lap_msg {
struct ib_mad_hdr hdr;
......@@ -675,7 +675,7 @@ struct cm_lap_msg {
u8 offset63;
u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
{
......@@ -784,7 +784,7 @@ struct cm_apr_msg {
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
struct cm_sidr_req_msg {
struct ib_mad_hdr hdr;
......@@ -795,7 +795,7 @@ struct cm_sidr_req_msg {
__be64 service_id;
u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
} __packed;
struct cm_sidr_rep_msg {
struct ib_mad_hdr hdr;
......@@ -811,7 +811,7 @@ struct cm_sidr_rep_msg {
u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
} __attribute__ ((packed));
} __packed;
static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
{
......
......@@ -73,14 +73,14 @@ struct ib_mad_private_header {
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
u64 mapping;
} __attribute__ ((packed));
} __packed;
struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
u8 mad[0];
} __attribute__ ((packed));
} __packed;
struct ib_rmpp_segment {
struct list_head list;
......
......@@ -64,7 +64,7 @@ enum t3_wr_flags {
T3_SOLICITED_EVENT_FLAG = 0x04,
T3_READ_FENCE_FLAG = 0x08,
T3_LOCAL_FENCE_FLAG = 0x10
} __attribute__ ((packed));
} __packed;
enum t3_wr_opcode {
T3_WR_BP = FW_WROPCODE_RI_BYPASS,
......@@ -77,7 +77,7 @@ enum t3_wr_opcode {
T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
} __attribute__ ((packed));
} __packed;
enum t3_rdma_opcode {
T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
......@@ -95,7 +95,7 @@ enum t3_rdma_opcode {
T3_QP_MOD,
T3_BYPASS,
T3_RDMA_READ_REQ_WITH_INV,
} __attribute__ ((packed));
} __packed;
static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
{
......@@ -306,7 +306,7 @@ enum t3_mpa_attrs {
uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
uP_RI_MPA_CRC_ENABLE = 0x4,
uP_RI_MPA_IETF_ENABLE = 0x8
} __attribute__ ((packed));
} __packed;
enum t3_qp_caps {
uP_RI_QP_RDMA_READ_ENABLE = 0x01,
......@@ -314,7 +314,7 @@ enum t3_qp_caps {
uP_RI_QP_BIND_ENABLE = 0x04,
uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
uP_RI_QP_STAG0_ENABLE = 0x10
} __attribute__ ((packed));
} __packed;
enum rdma_init_rtr_types {
RTR_READ = 1,
......
......@@ -77,7 +77,7 @@ struct mthca_cq_context {
__be32 ci_db; /* Arbel only */
__be32 state_db; /* Arbel only */
u32 reserved;
} __attribute__((packed));
} __packed;
#define MTHCA_CQ_STATUS_OK ( 0 << 28)
#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
......
......@@ -63,7 +63,7 @@ struct mthca_eq_context {
__be32 consumer_index;
__be32 producer_index;
u32 reserved3[4];
} __attribute__((packed));
} __packed;
#define MTHCA_EQ_STATUS_OK ( 0 << 28)
#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
......@@ -130,7 +130,7 @@ struct mthca_eqe {
u32 raw[6];
struct {
__be32 cqn;
} __attribute__((packed)) comp;
} __packed comp;
struct {
u16 reserved1;
__be16 token;
......@@ -138,27 +138,27 @@ struct mthca_eqe {
u8 reserved3[3];
u8 status;
__be64 out_param;
} __attribute__((packed)) cmd;
} __packed cmd;
struct {
__be32 qpn;
} __attribute__((packed)) qp;
} __packed qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
} __packed srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
} __attribute__((packed)) cq_err;
} __packed cq_err;
struct {
u32 reserved1[2];
__be32 port;
} __attribute__((packed)) port_change;
} __packed port_change;
} event;
u8 reserved3[3];
u8 owner;
} __attribute__((packed));
} __packed;
#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
......
......@@ -60,7 +60,7 @@ struct mthca_mpt_entry {
__be64 mtt_seg;
__be32 mtt_sz; /* Arbel only */
u32 reserved[2];
} __attribute__((packed));
} __packed;
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MTHCA_MPT_FLAG_MIO (1 << 17)
......
......@@ -115,7 +115,7 @@ struct mthca_qp_path {
u8 hop_limit;
__be32 sl_tclass_flowlabel;
u8 rgid[16];
} __attribute__((packed));
} __packed;
struct mthca_qp_context {
__be32 flags;
......@@ -154,14 +154,14 @@ struct mthca_qp_context {
__be16 rq_wqe_counter; /* reserved on Tavor */
__be16 sq_wqe_counter; /* reserved on Tavor */
u32 reserved3[18];
} __attribute__((packed));
} __packed;
struct mthca_qp_param {
__be32 opt_param_mask;
u32 reserved1;
struct mthca_qp_context context;
u32 reserved2[62];
} __attribute__((packed));
} __packed;
enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
......
......@@ -643,7 +643,7 @@ struct rxe_atmeth {
__be32 rkey;
__be64 swap_add;
__be64 comp;
} __attribute__((__packed__));
} __packed;
static inline u64 __atmeth_va(void *arg)
{
......
......@@ -311,7 +311,7 @@ struct iser_login_desc {
u64 rsp_dma;
struct ib_sge sge;
struct ib_cqe cqe;
} __attribute__((packed));
} __packed;
struct iser_conn;
struct ib_conn;
......
......@@ -198,7 +198,7 @@ struct ib_sa_hdr {
__be16 attr_offset;
__be16 reserved;
ib_sa_comp_mask comp_mask;
} __attribute__ ((packed));
} __packed;
struct ib_mad {
struct ib_mad_hdr mad_hdr;
......@@ -227,7 +227,7 @@ struct ib_sa_mad {
struct ib_rmpp_hdr rmpp_hdr;
struct ib_sa_hdr sa_hdr;
u8 data[IB_MGMT_SA_DATA];
} __attribute__ ((packed));
} __packed;
struct ib_vendor_mad {
struct ib_mad_hdr mad_hdr;
......
......@@ -61,7 +61,7 @@ struct ib_smp {
u8 data[IB_SMP_DATA_SIZE];
u8 initial_path[IB_SMP_MAX_PATH_HOPS];
u8 return_path[IB_SMP_MAX_PATH_HOPS];
} __attribute__ ((packed));
} __packed;
#define IB_SMP_DIRECTION cpu_to_be16(0x8000)
......
......@@ -413,6 +413,6 @@ struct opa_port_info {
u8 local_port_num;
u8 reserved12;
u8 reserved13; /* was guid_cap */
} __attribute__ ((packed));
} __packed;
#endif /* OPA_PORT_INFO_H */
......@@ -98,7 +98,7 @@ struct opa_smp {
struct opa_node_description {
u8 data[64];
} __attribute__ ((packed));
} __packed;
struct opa_node_info {
u8 base_version;
......@@ -114,7 +114,7 @@ struct opa_node_info {
__be32 revision;
u8 local_port_num;
u8 vendor_id[3]; /* network byte order */
} __attribute__ ((packed));
} __packed;
#define OPA_PARTITION_TABLE_BLK_SIZE 32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment