Commit 13ca970e authored by Wei Hu(Xavier)'s avatar Wei Hu(Xavier) Committed by Doug Ledford

RDMA/hns: Modify assignment device variable to support both PCI device and platform device

In order to support the scalability of the hardware version, the
features irrelevant to the hardware will be located in the hns-roce.ko,
and the hardware relevant operations will be located in hns_roce_hw_v1.ko
or hns_roce_hw_v2.ko based on the series chips.

The hip08 RoCE engine is a PCI device, hip06 RoCE engine is a platform
device. In order to support both platform device and PCI device, We
replace &hr_dev->pdev->dev with hr_dev->dev in hns-roce.ko as belows:
	Before modification:
		struct device *dev = hr_dev->dev;
	After modification:
		struct device *dev = &hr_dev->pdev->dev;

	The related structure:
	struct hns_roce_dev {
		...
		struct platform_device  *pdev;
		struct pci_dev		*pci_dev;
		struct device		*dev;
		...
	}
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent dd74282d
......@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
......
......@@ -161,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf)
{
int i;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
......@@ -172,7 +172,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
dma_free_coherent(dev, PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
......@@ -186,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int i = 0;
dma_addr_t t;
struct page **pages;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
......
......@@ -125,7 +125,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op,
unsigned long timeout)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
u8 __iomem *hcr = hr_dev->cmd.hcr;
unsigned long end = 0;
u32 status = 0;
......@@ -196,8 +196,8 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned long timeout)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context;
struct device *dev = hr_dev->dev;
int ret = 0;
spin_lock(&cmd->context_lock);
......@@ -273,7 +273,7 @@ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
......
......@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(&hr_dev->pdev->dev,
dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn);
return;
......@@ -87,7 +87,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
{
struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
dma_addr_t dma_handle;
u64 *mtts = NULL;
int ret = 0;
......@@ -182,7 +182,7 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
......@@ -282,7 +282,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
......@@ -416,7 +416,7 @@ EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
......@@ -432,7 +432,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree,
......
......@@ -84,7 +84,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
goto fail;
......@@ -115,7 +115,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&hr_dev->pdev->dev,
dma_free_coherent(hr_dev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
......@@ -128,8 +128,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev;
unsigned long end = 0;
unsigned long flags;
struct hns_roce_hem_iter iter;
......@@ -212,7 +212,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
int ret = 0;
unsigned long i;
......@@ -251,7 +251,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
unsigned long i;
i = (obj & (table->num_obj - 1)) /
......@@ -380,7 +380,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
unsigned long i;
for (i = 0; i < table->num_hem; ++i)
......
......@@ -4050,6 +4050,7 @@ static int hns_roce_probe(struct platform_device *pdev)
}
hr_dev->pdev = pdev;
hr_dev->dev = dev;
platform_set_drvdata(pdev, hr_dev);
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
......
......@@ -117,7 +117,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct net_device *netdev;
netdev = hr_dev->iboe.netdevs[port];
......@@ -240,7 +240,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct net_device *net_dev;
unsigned long flags;
enum ib_mtu mtu;
......@@ -428,7 +428,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
int ret;
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
......@@ -536,7 +536,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
......@@ -605,7 +605,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
......@@ -668,7 +668,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
if (hr_dev->hw->reset) {
ret = hr_dev->hw->reset(hr_dev, true);
......
......@@ -241,9 +241,9 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
u64 size, u32 access, int npages,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
unsigned long index = 0;
int ret = 0;
struct device *dev = &hr_dev->pdev->dev;
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
......@@ -276,7 +276,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
int npages = 0;
int ret;
......@@ -302,7 +302,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
{
int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
......@@ -552,7 +552,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr = NULL;
int ret = 0;
int n = 0;
......
......@@ -60,7 +60,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd;
int ret;
......
......@@ -44,7 +44,7 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp;
spin_lock(&qp_table->lock);
......@@ -154,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
goto err_put_irrl;
}
......@@ -172,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
int ret;
if (!qpn)
......@@ -261,8 +261,8 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
u32 max_cnt;
struct device *dev = &hr_dev->pdev->dev;
/* Check the validity of QP support capacity */
if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
......@@ -319,7 +319,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
dev_err(hr_dev->dev, "check SQ size error!\n");
return -EINVAL;
}
......@@ -343,7 +343,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
u32 max_cnt;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
......@@ -395,7 +395,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp)
{
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
unsigned long qpn = 0;
int ret = 0;
......@@ -575,7 +575,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
......@@ -660,7 +660,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
struct device *dev = &hr_dev->pdev->dev;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int p;
enum ib_mtu active_mtu;
......@@ -835,7 +835,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
ret);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment