Commit e2b2744a authored by Yixian Liu's avatar Yixian Liu Committed by Jason Gunthorpe

RDMA/hns: Redefine interfaces used in creating cq

Some interfaces defined with unnecessary input parameters, such as "nent"
and "vector". This patch redefined these interfaces to make the code more
readable and simple.

Link: https://lore.kernel.org/r/1574044493-46984-2-git-send-email-liweihang@hisilicon.comSigned-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 666e8ff5
...@@ -82,9 +82,8 @@ static int hns_roce_hw_create_cq(struct hns_roce_dev *dev, ...@@ -82,9 +82,8 @@ static int hns_roce_hw_create_cq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
} }
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *hr_mtt, struct hns_roce_cq *hr_cq)
struct hns_roce_cq *hr_cq, int vector)
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table; struct hns_roce_hem_table *mtt_table;
...@@ -103,18 +102,13 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -103,18 +102,13 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
mtt_table = &hr_dev->mr_table.mtt_table; mtt_table = &hr_dev->mr_table.mtt_table;
mtts = hns_roce_table_find(hr_dev, mtt_table, mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle); hr_cq->hr_buf.hr_mtt.first_seg,
&dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "Failed to find mtt for CQ buf.\n"); dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL; return -EINVAL;
} }
if (vector >= hr_dev->caps.num_comp_vectors) {
dev_err(dev, "Invalid vector(0x%x) for CQ alloc.\n", vector);
return -EINVAL;
}
hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) { if (ret) {
dev_err(dev, "Num of CQ out of range.\n"); dev_err(dev, "Num of CQ out of range.\n");
...@@ -143,8 +137,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -143,8 +137,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa; goto err_xa;
} }
hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle, hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
nent, vector);
/* Send mailbox to hw */ /* Send mailbox to hw */
ret = hns_roce_hw_create_cq(hr_dev, mailbox, hr_cq->cqn); ret = hns_roce_hw_create_cq(hr_dev, mailbox, hr_cq->cqn);
...@@ -210,15 +203,18 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ...@@ -210,15 +203,18 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
} }
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, struct hns_roce_cq *hr_cq,
struct hns_roce_cq_buf *buf, struct hns_roce_ib_create_cq ucmd,
struct ib_umem **umem, u64 buf_addr, int cqe) struct ib_udata *udata)
{ {
int ret; struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
struct ib_umem **umem = &hr_cq->umem;
u32 page_shift; u32 page_shift;
u32 npages; u32 npages;
int ret;
*umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz, *umem = ib_umem_get(udata, ucmd.buf_addr,
hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
...@@ -257,10 +253,12 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -257,10 +253,12 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
} }
static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, u32 nent) struct hns_roce_cq *hr_cq)
{ {
int ret; struct hns_roce_cq_buf *buf = &hr_cq->hr_buf;
u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz; u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
u32 nent = hr_cq->cq_depth;
int ret;
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
(1 << page_shift) * 2, &buf->hr_buf, (1 << page_shift) * 2, &buf->hr_buf,
...@@ -295,17 +293,16 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -295,17 +293,16 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
} }
static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev, static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, int cqe) struct hns_roce_cq *hr_cq)
{ {
hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz, hns_roce_buf_free(hr_dev, hr_cq->cq_depth * hr_dev->caps.cq_entry_sz,
&buf->hr_buf); &hr_cq->hr_buf.hr_buf);
} }
static int create_user_cq(struct hns_roce_dev *hr_dev, static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, struct hns_roce_cq *hr_cq,
struct ib_udata *udata, struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp, struct hns_roce_ib_create_cq_resp *resp)
int cq_entries)
{ {
struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
...@@ -319,9 +316,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, ...@@ -319,9 +316,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
} }
/* Get user space address, write it into mtt table */ /* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf, ret = hns_roce_ib_get_cq_umem(hr_dev, hr_cq, ucmd, udata);
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n"); dev_err(dev, "Failed to get_cq_umem.\n");
return ret; return ret;
...@@ -349,7 +344,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev, ...@@ -349,7 +344,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
} }
static int create_kernel_cq(struct hns_roce_dev *hr_dev, static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, int cq_entries) struct hns_roce_cq *hr_cq)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
...@@ -365,7 +360,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev, ...@@ -365,7 +360,7 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
} }
/* Init mtt table and write buff address to mtt table */ /* Init mtt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries); ret = hns_roce_ib_alloc_cq_buf(hr_dev, hr_cq);
if (ret) { if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n"); dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db; goto err_db;
...@@ -403,7 +398,7 @@ static void destroy_kernel_cq(struct hns_roce_dev *hr_dev, ...@@ -403,7 +398,7 @@ static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq) struct hns_roce_cq *hr_cq)
{ {
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe); hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db); hns_roce_free_db(hr_dev, &hr_cq->db);
...@@ -414,11 +409,11 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, ...@@ -414,11 +409,11 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct device *dev = hr_dev->dev;
int vector = attr->comp_vector; int vector = attr->comp_vector;
int cq_entries = attr->cqe; u32 cq_entries = attr->cqe;
int ret; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
...@@ -427,21 +422,27 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, ...@@ -427,21 +422,27 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return -EINVAL; return -EINVAL;
} }
if (hr_dev->caps.min_cqes) if (vector >= hr_dev->caps.num_comp_vectors) {
cq_entries = max(cq_entries, hr_dev->caps.min_cqes); dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
vector, hr_dev->caps.num_comp_vectors);
return -EINVAL;
}
cq_entries = roundup_pow_of_two((unsigned int)cq_entries); cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
hr_cq->ib_cq.cqe = cq_entries - 1; cq_entries = roundup_pow_of_two(cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
hr_cq->cq_depth = cq_entries;
hr_cq->vector = vector;
spin_lock_init(&hr_cq->lock); spin_lock_init(&hr_cq->lock);
if (udata) { if (udata) {
ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries); ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) { if (ret) {
dev_err(dev, "Create cq failed in user mode!\n"); dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq; goto err_cq;
} }
} else { } else {
ret = create_kernel_cq(hr_dev, hr_cq, cq_entries); ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) { if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n"); dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq; goto err_cq;
...@@ -449,8 +450,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, ...@@ -449,8 +450,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, ret = hns_roce_cq_alloc(hr_dev, hr_cq);
hr_cq, vector);
if (ret) { if (ret) {
dev_err(dev, "Alloc CQ failed(%d).\n", ret); dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap; goto err_dbmap;
...@@ -468,7 +468,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq, ...@@ -468,7 +468,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
/* Get created cq handler and carry out event */ /* Get created cq handler and carry out event */
hr_cq->comp = hns_roce_ib_cq_comp; hr_cq->comp = hns_roce_ib_cq_comp;
hr_cq->event = hns_roce_ib_cq_event; hr_cq->event = hns_roce_ib_cq_event;
hr_cq->cq_depth = cq_entries;
if (udata) { if (udata) {
resp.cqn = hr_cq->cqn; resp.cqn = hr_cq->cqn;
...@@ -515,7 +514,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -515,7 +514,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db); &hr_cq->db);
} else { } else {
/* Free the buff of stored cq */ /* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe); hns_roce_ib_free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db); hns_roce_free_db(hr_dev, &hr_cq->db);
} }
......
...@@ -812,8 +812,8 @@ struct hns_roce_caps { ...@@ -812,8 +812,8 @@ struct hns_roce_caps {
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int num_cqs; int num_cqs;
int max_cqes; u32 max_cqes;
int min_cqes; u32 min_cqes;
u32 min_wqes; u32 min_wqes;
int reserved_cqs; int reserved_cqs;
int reserved_srqs; int reserved_srqs;
...@@ -944,7 +944,7 @@ struct hns_roce_hw { ...@@ -944,7 +944,7 @@ struct hns_roce_hw {
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev, int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx); struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev, int (*clear_hem)(struct hns_roce_dev *hr_dev,
......
...@@ -1990,7 +1990,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) ...@@ -1990,7 +1990,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
!!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL; !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
} }
static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
...@@ -2073,8 +2073,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, ...@@ -2073,8 +2073,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle, int nent, u64 *mtts, dma_addr_t dma_handle)
u32 vector)
{ {
struct hns_roce_cq_context *cq_context = NULL; struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf; struct hns_roce_buf_list *tptr_buf;
...@@ -2109,9 +2108,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2109,9 +2108,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_12, roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
ilog2((unsigned int)nent)); ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector); CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
......
...@@ -2457,7 +2457,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) ...@@ -2457,7 +2457,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^ return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL; !!(n & hr_cq->cq_depth)) ? cqe : NULL;
} }
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq) static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
...@@ -2550,8 +2550,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, ...@@ -2550,8 +2550,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle, int nent, u64 *mtts, dma_addr_t dma_handle)
u32 vector)
{ {
struct hns_roce_v2_cq_context *cq_context; struct hns_roce_v2_cq_context *cq_context;
...@@ -2563,9 +2562,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2563,9 +2562,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M, roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE); V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M, roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); V2_CQC_BYTE_4_SHIFT_S,
ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M, roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S, vector); V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment