Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
82770a2f
Commit
82770a2f
authored
May 17, 2016
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/qcom' into for-linus
parents
ba8b6cc0
42d236f8
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1302 additions
and
43 deletions
+1302
-43
Documentation/ABI/testing/sysfs-platform-hidma
Documentation/ABI/testing/sysfs-platform-hidma
+9
-0
Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+2
-0
drivers/dma/qcom/Makefile
drivers/dma/qcom/Makefile
+2
-0
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/bam_dma.c
+26
-12
drivers/dma/qcom/hidma.c
drivers/dma/qcom/hidma.c
+46
-6
drivers/dma/qcom/hidma.h
drivers/dma/qcom/hidma.h
+19
-21
drivers/dma/qcom/hidma_dbg.c
drivers/dma/qcom/hidma_dbg.c
+217
-0
drivers/dma/qcom/hidma_ll.c
drivers/dma/qcom/hidma_ll.c
+872
-0
drivers/dma/qcom/hidma_mgmt.c
drivers/dma/qcom/hidma_mgmt.c
+109
-4
No files found.
Documentation/ABI/testing/sysfs-platform-hidma
0 → 100644
View file @
82770a2f
What: /sys/devices/platform/hidma-*/chid
/sys/devices/platform/QCOM8061:*/chid
Date: Dec 2015
KernelVersion: 4.4
Contact: "Sinan Kaya <okaya@cudeaurora.org>"
Description:
Contains the ID of the channel within the HIDMA instance.
It is used to associate a given HIDMA channel with the
priority and weight calls in the management interface.
Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
View file @
82770a2f
...
...
@@ -13,6 +13,8 @@ Required properties:
- clock-names: must contain "bam_clk" entry
- qcom,ee : indicates the active Execution Environment identifier (0-7) used in
the secure world.
- qcom,controlled-remotely : optional, indicates that the bam is controlled by
remote proccessor i.e. execution environment.
Example:
...
...
drivers/dma/qcom/Makefile
View file @
82770a2f
obj-$(CONFIG_QCOM_BAM_DMA)
+=
bam_dma.o
obj-$(CONFIG_QCOM_HIDMA_MGMT)
+=
hdma_mgmt.o
hdma_mgmt-objs
:=
hidma_mgmt.o hidma_mgmt_sys.o
obj-$(CONFIG_QCOM_HIDMA)
+=
hdma.o
hdma-objs
:=
hidma_ll.o hidma.o hidma_dbg.o
drivers/dma/qcom/bam_dma.c
View file @
82770a2f
...
...
@@ -342,7 +342,7 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define BAM_
MAX_DATA
_SIZE (SZ_32K - 8)
#define BAM_
FIFO
_SIZE (SZ_32K - 8)
struct
bam_chan
{
struct
virt_dma_chan
vc
;
...
...
@@ -387,6 +387,7 @@ struct bam_device {
/* execution environment ID, from DT */
u32
ee
;
bool
controlled_remotely
;
const
struct
reg_offset_data
*
layout
;
...
...
@@ -458,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
*/
writel_relaxed
(
ALIGN
(
bchan
->
fifo_phys
,
sizeof
(
struct
bam_desc_hw
)),
bam_addr
(
bdev
,
bchan
->
id
,
BAM_P_DESC_FIFO_ADDR
));
writel_relaxed
(
BAM_
DESC_
FIFO_SIZE
,
writel_relaxed
(
BAM_FIFO_SIZE
,
bam_addr
(
bdev
,
bchan
->
id
,
BAM_P_FIFO_SIZES
));
/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
...
...
@@ -604,7 +605,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
/* calculate number of required entries */
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
num_alloc
+=
DIV_ROUND_UP
(
sg_dma_len
(
sg
),
BAM_
MAX_DATA
_SIZE
);
num_alloc
+=
DIV_ROUND_UP
(
sg_dma_len
(
sg
),
BAM_
FIFO
_SIZE
);
/* allocate enough room to accomodate the number of entries */
async_desc
=
kzalloc
(
sizeof
(
*
async_desc
)
+
...
...
@@ -635,10 +636,10 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
desc
->
addr
=
cpu_to_le32
(
sg_dma_address
(
sg
)
+
curr_offset
);
if
(
remainder
>
BAM_
MAX_DATA
_SIZE
)
{
desc
->
size
=
cpu_to_le16
(
BAM_
MAX_DATA
_SIZE
);
remainder
-=
BAM_
MAX_DATA
_SIZE
;
curr_offset
+=
BAM_
MAX_DATA
_SIZE
;
if
(
remainder
>
BAM_
FIFO
_SIZE
)
{
desc
->
size
=
cpu_to_le16
(
BAM_
FIFO
_SIZE
);
remainder
-=
BAM_
FIFO
_SIZE
;
curr_offset
+=
BAM_
FIFO
_SIZE
;
}
else
{
desc
->
size
=
cpu_to_le16
(
remainder
);
remainder
=
0
;
...
...
@@ -801,13 +802,17 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if
(
srcs
&
P_IRQ
)
tasklet_schedule
(
&
bdev
->
task
);
if
(
srcs
&
BAM_IRQ
)
if
(
srcs
&
BAM_IRQ
)
{
clr_mask
=
readl_relaxed
(
bam_addr
(
bdev
,
0
,
BAM_IRQ_STTS
));
/* don't allow reorder of the various accesses to the BAM registers */
mb
();
/*
* don't allow reorder of the various accesses to the BAM
* registers
*/
mb
();
writel_relaxed
(
clr_mask
,
bam_addr
(
bdev
,
0
,
BAM_IRQ_CLR
));
writel_relaxed
(
clr_mask
,
bam_addr
(
bdev
,
0
,
BAM_IRQ_CLR
));
}
return
IRQ_HANDLED
;
}
...
...
@@ -1038,6 +1043,9 @@ static int bam_init(struct bam_device *bdev)
val
=
readl_relaxed
(
bam_addr
(
bdev
,
0
,
BAM_NUM_PIPES
));
bdev
->
num_channels
=
val
&
BAM_NUM_PIPES_MASK
;
if
(
bdev
->
controlled_remotely
)
return
0
;
/* s/w reset bam */
/* after reset all pipes are disabled and idle */
val
=
readl_relaxed
(
bam_addr
(
bdev
,
0
,
BAM_CTRL
));
...
...
@@ -1125,6 +1133,9 @@ static int bam_dma_probe(struct platform_device *pdev)
return
ret
;
}
bdev
->
controlled_remotely
=
of_property_read_bool
(
pdev
->
dev
.
of_node
,
"qcom,controlled-remotely"
);
bdev
->
bamclk
=
devm_clk_get
(
bdev
->
dev
,
"bam_clk"
);
if
(
IS_ERR
(
bdev
->
bamclk
))
return
PTR_ERR
(
bdev
->
bamclk
);
...
...
@@ -1163,7 +1174,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev
->
common
.
dev
=
bdev
->
dev
;
bdev
->
common
.
dev
->
dma_parms
=
&
bdev
->
dma_parms
;
ret
=
dma_set_max_seg_size
(
bdev
->
common
.
dev
,
BAM_
MAX_DATA
_SIZE
);
ret
=
dma_set_max_seg_size
(
bdev
->
common
.
dev
,
BAM_
FIFO
_SIZE
);
if
(
ret
)
{
dev_err
(
bdev
->
dev
,
"cannot set maximum segment size
\n
"
);
goto
err_bam_channel_exit
;
...
...
@@ -1234,6 +1245,9 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all
(
&
bdev
->
channels
[
i
].
vc
.
chan
);
tasklet_kill
(
&
bdev
->
channels
[
i
].
vc
.
task
);
if
(
!
bdev
->
channels
[
i
].
fifo_virt
)
continue
;
dma_free_wc
(
bdev
->
dev
,
BAM_DESC_FIFO_SIZE
,
bdev
->
channels
[
i
].
fifo_virt
,
bdev
->
channels
[
i
].
fifo_phys
);
...
...
drivers/dma/qcom/hidma.c
View file @
82770a2f
/*
* Qualcomm Technologies HIDMA DMA engine interface
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2015
-2016
, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
...
...
@@ -404,7 +404,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
spin_unlock_irqrestore
(
&
mchan
->
lock
,
irqflags
);
/* this suspends the existing transfer */
rc
=
hidma_ll_
paus
e
(
dmadev
->
lldev
);
rc
=
hidma_ll_
disabl
e
(
dmadev
->
lldev
);
if
(
rc
)
{
dev_err
(
dmadev
->
ddev
.
dev
,
"channel did not pause
\n
"
);
goto
out
;
...
...
@@ -427,7 +427,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
list_move
(
&
mdesc
->
node
,
&
mchan
->
free
);
}
rc
=
hidma_ll_
resum
e
(
dmadev
->
lldev
);
rc
=
hidma_ll_
enabl
e
(
dmadev
->
lldev
);
out:
pm_runtime_mark_last_busy
(
dmadev
->
ddev
.
dev
);
pm_runtime_put_autosuspend
(
dmadev
->
ddev
.
dev
);
...
...
@@ -488,7 +488,7 @@ static int hidma_pause(struct dma_chan *chan)
dmadev
=
to_hidma_dev
(
mchan
->
chan
.
device
);
if
(
!
mchan
->
paused
)
{
pm_runtime_get_sync
(
dmadev
->
ddev
.
dev
);
if
(
hidma_ll_
paus
e
(
dmadev
->
lldev
))
if
(
hidma_ll_
disabl
e
(
dmadev
->
lldev
))
dev_warn
(
dmadev
->
ddev
.
dev
,
"channel did not stop
\n
"
);
mchan
->
paused
=
true
;
pm_runtime_mark_last_busy
(
dmadev
->
ddev
.
dev
);
...
...
@@ -507,7 +507,7 @@ static int hidma_resume(struct dma_chan *chan)
dmadev
=
to_hidma_dev
(
mchan
->
chan
.
device
);
if
(
mchan
->
paused
)
{
pm_runtime_get_sync
(
dmadev
->
ddev
.
dev
);
rc
=
hidma_ll_
resum
e
(
dmadev
->
lldev
);
rc
=
hidma_ll_
enabl
e
(
dmadev
->
lldev
);
if
(
!
rc
)
mchan
->
paused
=
false
;
else
...
...
@@ -530,6 +530,43 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return
hidma_ll_inthandler
(
chirq
,
lldev
);
}
static
ssize_t
hidma_show_values
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
struct
platform_device
*
pdev
=
to_platform_device
(
dev
);
struct
hidma_dev
*
mdev
=
platform_get_drvdata
(
pdev
);
buf
[
0
]
=
0
;
if
(
strcmp
(
attr
->
attr
.
name
,
"chid"
)
==
0
)
sprintf
(
buf
,
"%d
\n
"
,
mdev
->
chidx
);
return
strlen
(
buf
);
}
static
int
hidma_create_sysfs_entry
(
struct
hidma_dev
*
dev
,
char
*
name
,
int
mode
)
{
struct
device_attribute
*
attrs
;
char
*
name_copy
;
attrs
=
devm_kmalloc
(
dev
->
ddev
.
dev
,
sizeof
(
struct
device_attribute
),
GFP_KERNEL
);
if
(
!
attrs
)
return
-
ENOMEM
;
name_copy
=
devm_kstrdup
(
dev
->
ddev
.
dev
,
name
,
GFP_KERNEL
);
if
(
!
name_copy
)
return
-
ENOMEM
;
attrs
->
attr
.
name
=
name_copy
;
attrs
->
attr
.
mode
=
mode
;
attrs
->
show
=
hidma_show_values
;
sysfs_attr_init
(
&
attrs
->
attr
);
return
device_create_file
(
dev
->
ddev
.
dev
,
attrs
);
}
static
int
hidma_probe
(
struct
platform_device
*
pdev
)
{
struct
hidma_dev
*
dmadev
;
...
...
@@ -644,6 +681,8 @@ static int hidma_probe(struct platform_device *pdev)
dmadev
->
irq
=
chirq
;
tasklet_init
(
&
dmadev
->
task
,
hidma_issue_task
,
(
unsigned
long
)
dmadev
);
hidma_debug_init
(
dmadev
);
hidma_create_sysfs_entry
(
dmadev
,
"chid"
,
S_IRUGO
);
dev_info
(
&
pdev
->
dev
,
"HI-DMA engine driver registration complete
\n
"
);
platform_set_drvdata
(
pdev
,
dmadev
);
pm_runtime_mark_last_busy
(
dmadev
->
ddev
.
dev
);
...
...
@@ -651,6 +690,7 @@ static int hidma_probe(struct platform_device *pdev)
return
0
;
uninit:
hidma_debug_uninit
(
dmadev
);
hidma_ll_uninit
(
dmadev
->
lldev
);
dmafree:
if
(
dmadev
)
...
...
@@ -668,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync
(
dmadev
->
ddev
.
dev
);
dma_async_device_unregister
(
&
dmadev
->
ddev
);
devm_free_irq
(
dmadev
->
ddev
.
dev
,
dmadev
->
irq
,
dmadev
->
lldev
);
hidma_debug_uninit
(
dmadev
);
hidma_ll_uninit
(
dmadev
->
lldev
);
hidma_free
(
dmadev
);
...
...
@@ -689,7 +730,6 @@ static const struct of_device_id hidma_match[] = {
{.
compatible
=
"qcom,hidma-1.0"
,},
{},
};
MODULE_DEVICE_TABLE
(
of
,
hidma_match
);
static
struct
platform_driver
hidma_driver
=
{
...
...
drivers/dma/qcom/hidma.h
View file @
82770a2f
/*
* Qualcomm Technologies HIDMA data structures
*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2014
-2016
, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
...
...
@@ -20,32 +20,29 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#define TRE_SIZE 32
/* each TRE is 32 bytes */
#define TRE_CFG_IDX 0
#define TRE_LEN_IDX 1
#define TRE_SRC_LOW_IDX 2
#define TRE_SRC_HI_IDX 3
#define TRE_DEST_LOW_IDX 4
#define TRE_DEST_HI_IDX 5
struct
hidma_tx_status
{
u8
err_info
;
/* error record in this transfer */
u8
err_code
;
/* completion code */
};
#define HIDMA_TRE_SIZE 32
/* each TRE is 32 bytes */
#define HIDMA_TRE_CFG_IDX 0
#define HIDMA_TRE_LEN_IDX 1
#define HIDMA_TRE_SRC_LOW_IDX 2
#define HIDMA_TRE_SRC_HI_IDX 3
#define HIDMA_TRE_DEST_LOW_IDX 4
#define HIDMA_TRE_DEST_HI_IDX 5
struct
hidma_tre
{
atomic_t
allocated
;
/* if this channel is allocated */
bool
queued
;
/* flag whether this is pending */
u16
status
;
/* status */
u32
ch
idx
;
/* index of the tre */
u32
idx
;
/* index of the tre */
u32
dma_sig
;
/* signature of the tre */
const
char
*
dev_name
;
/* name of the device */
void
(
*
callback
)(
void
*
data
);
/* requester callback */
void
*
data
;
/* Data associated with this channel*/
struct
hidma_lldev
*
lldev
;
/* lldma device pointer */
u32
tre_local
[
TRE_SIZE
/
sizeof
(
u32
)
+
1
];
/* TRE local copy
*/
u32
tre_local
[
HIDMA_TRE_SIZE
/
sizeof
(
u32
)
+
1
];
/* TRE local copy
*/
u32
tre_index
;
/* the offset where this was written*/
u32
int_flags
;
/* interrupt flags */
u8
err_info
;
/* error record in this transfer */
u8
err_code
;
/* completion code */
};
struct
hidma_lldev
{
...
...
@@ -61,22 +58,21 @@ struct hidma_lldev {
void
__iomem
*
evca
;
/* Event Channel address */
struct
hidma_tre
**
pending_tre_list
;
/* Pointers to pending TREs */
struct
hidma_tx_status
*
tx_status_list
;
/* Pointers to pending TREs status*/
s32
pending_tre_count
;
/* Number of TREs pending */
void
*
tre_ring
;
/* TRE ring */
dma_addr_t
tre_
ring_handle
;
/* TRE ring to be shared with HW */
dma_addr_t
tre_
dma
;
/* TRE ring to be shared with HW */
u32
tre_ring_size
;
/* Byte size of the ring */
u32
tre_processed_off
;
/* last processed TRE */
void
*
evre_ring
;
/* EVRE ring */
dma_addr_t
evre_
ring_handle
;
/* EVRE ring to be shared with HW */
dma_addr_t
evre_
dma
;
/* EVRE ring to be shared with HW */
u32
evre_ring_size
;
/* Byte size of the ring */
u32
evre_processed_off
;
/* last processed EVRE */
u32
tre_write_offset
;
/* TRE write location */
struct
tasklet_struct
task
;
/* task delivering notifications */
struct
tasklet_struct
rst_task
;
/* task to reset HW */
DECLARE_KFIFO_PTR
(
handoff_fifo
,
struct
hidma_tre
*
);
/* pending TREs FIFO */
};
...
...
@@ -145,8 +141,8 @@ enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
bool
hidma_ll_isenabled
(
struct
hidma_lldev
*
llhndl
);
void
hidma_ll_queue_request
(
struct
hidma_lldev
*
llhndl
,
u32
tre_ch
);
void
hidma_ll_start
(
struct
hidma_lldev
*
llhndl
);
int
hidma_ll_
pause
(
struct
hidma_lldev
*
llhndl
);
int
hidma_ll_
resum
e
(
struct
hidma_lldev
*
llhndl
);
int
hidma_ll_
disable
(
struct
hidma_lldev
*
lldev
);
int
hidma_ll_
enabl
e
(
struct
hidma_lldev
*
llhndl
);
void
hidma_ll_set_transfer_params
(
struct
hidma_lldev
*
llhndl
,
u32
tre_ch
,
dma_addr_t
src
,
dma_addr_t
dest
,
u32
len
,
u32
flags
);
int
hidma_ll_setup
(
struct
hidma_lldev
*
lldev
);
...
...
@@ -157,4 +153,6 @@ int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t
hidma_ll_inthandler
(
int
irq
,
void
*
arg
);
void
hidma_cleanup_pending_tre
(
struct
hidma_lldev
*
llhndl
,
u8
err_info
,
u8
err_code
);
int
hidma_debug_init
(
struct
hidma_dev
*
dmadev
);
void
hidma_debug_uninit
(
struct
hidma_dev
*
dmadev
);
#endif
drivers/dma/qcom/hidma_dbg.c
0 → 100644
View file @
82770a2f
/*
* Qualcomm Technologies HIDMA debug file
*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/pm_runtime.h>
#include "hidma.h"
static
void
hidma_ll_chstats
(
struct
seq_file
*
s
,
void
*
llhndl
,
u32
tre_ch
)
{
struct
hidma_lldev
*
lldev
=
llhndl
;
struct
hidma_tre
*
tre
;
u32
length
;
dma_addr_t
src_start
;
dma_addr_t
dest_start
;
u32
*
tre_local
;
if
(
tre_ch
>=
lldev
->
nr_tres
)
{
dev_err
(
lldev
->
dev
,
"invalid TRE number in chstats:%d"
,
tre_ch
);
return
;
}
tre
=
&
lldev
->
trepool
[
tre_ch
];
seq_printf
(
s
,
"------Channel %d -----
\n
"
,
tre_ch
);
seq_printf
(
s
,
"allocated=%d
\n
"
,
atomic_read
(
&
tre
->
allocated
));
seq_printf
(
s
,
"queued = 0x%x
\n
"
,
tre
->
queued
);
seq_printf
(
s
,
"err_info = 0x%x
\n
"
,
tre
->
err_info
);
seq_printf
(
s
,
"err_code = 0x%x
\n
"
,
tre
->
err_code
);
seq_printf
(
s
,
"status = 0x%x
\n
"
,
tre
->
status
);
seq_printf
(
s
,
"idx = 0x%x
\n
"
,
tre
->
idx
);
seq_printf
(
s
,
"dma_sig = 0x%x
\n
"
,
tre
->
dma_sig
);
seq_printf
(
s
,
"dev_name=%s
\n
"
,
tre
->
dev_name
);
seq_printf
(
s
,
"callback=%p
\n
"
,
tre
->
callback
);
seq_printf
(
s
,
"data=%p
\n
"
,
tre
->
data
);
seq_printf
(
s
,
"tre_index = 0x%x
\n
"
,
tre
->
tre_index
);
tre_local
=
&
tre
->
tre_local
[
0
];
src_start
=
tre_local
[
HIDMA_TRE_SRC_LOW_IDX
];
src_start
=
((
u64
)
(
tre_local
[
HIDMA_TRE_SRC_HI_IDX
])
<<
32
)
+
src_start
;
dest_start
=
tre_local
[
HIDMA_TRE_DEST_LOW_IDX
];
dest_start
+=
((
u64
)
(
tre_local
[
HIDMA_TRE_DEST_HI_IDX
])
<<
32
);
length
=
tre_local
[
HIDMA_TRE_LEN_IDX
];
seq_printf
(
s
,
"src=%pap
\n
"
,
&
src_start
);
seq_printf
(
s
,
"dest=%pap
\n
"
,
&
dest_start
);
seq_printf
(
s
,
"length = 0x%x
\n
"
,
length
);
}
static
void
hidma_ll_devstats
(
struct
seq_file
*
s
,
void
*
llhndl
)
{
struct
hidma_lldev
*
lldev
=
llhndl
;
seq_puts
(
s
,
"------Device -----
\n
"
);
seq_printf
(
s
,
"lldev init = 0x%x
\n
"
,
lldev
->
initialized
);
seq_printf
(
s
,
"trch_state = 0x%x
\n
"
,
lldev
->
trch_state
);
seq_printf
(
s
,
"evch_state = 0x%x
\n
"
,
lldev
->
evch_state
);
seq_printf
(
s
,
"chidx = 0x%x
\n
"
,
lldev
->
chidx
);
seq_printf
(
s
,
"nr_tres = 0x%x
\n
"
,
lldev
->
nr_tres
);
seq_printf
(
s
,
"trca=%p
\n
"
,
lldev
->
trca
);
seq_printf
(
s
,
"tre_ring=%p
\n
"
,
lldev
->
tre_ring
);
seq_printf
(
s
,
"tre_ring_handle=%pap
\n
"
,
&
lldev
->
tre_dma
);
seq_printf
(
s
,
"tre_ring_size = 0x%x
\n
"
,
lldev
->
tre_ring_size
);
seq_printf
(
s
,
"tre_processed_off = 0x%x
\n
"
,
lldev
->
tre_processed_off
);
seq_printf
(
s
,
"pending_tre_count=%d
\n
"
,
lldev
->
pending_tre_count
);
seq_printf
(
s
,
"evca=%p
\n
"
,
lldev
->
evca
);
seq_printf
(
s
,
"evre_ring=%p
\n
"
,
lldev
->
evre_ring
);
seq_printf
(
s
,
"evre_ring_handle=%pap
\n
"
,
&
lldev
->
evre_dma
);
seq_printf
(
s
,
"evre_ring_size = 0x%x
\n
"
,
lldev
->
evre_ring_size
);
seq_printf
(
s
,
"evre_processed_off = 0x%x
\n
"
,
lldev
->
evre_processed_off
);
seq_printf
(
s
,
"tre_write_offset = 0x%x
\n
"
,
lldev
->
tre_write_offset
);
}
/*
* hidma_chan_stats: display HIDMA channel statistics
*
* Display the statistics for the current HIDMA virtual channel device.
*/
static
int
hidma_chan_stats
(
struct
seq_file
*
s
,
void
*
unused
)
{
struct
hidma_chan
*
mchan
=
s
->
private
;
struct
hidma_desc
*
mdesc
;
struct
hidma_dev
*
dmadev
=
mchan
->
dmadev
;
pm_runtime_get_sync
(
dmadev
->
ddev
.
dev
);
seq_printf
(
s
,
"paused=%u
\n
"
,
mchan
->
paused
);
seq_printf
(
s
,
"dma_sig=%u
\n
"
,
mchan
->
dma_sig
);
seq_puts
(
s
,
"prepared
\n
"
);
list_for_each_entry
(
mdesc
,
&
mchan
->
prepared
,
node
)
hidma_ll_chstats
(
s
,
mchan
->
dmadev
->
lldev
,
mdesc
->
tre_ch
);
seq_puts
(
s
,
"active
\n
"
);
list_for_each_entry
(
mdesc
,
&
mchan
->
active
,
node
)
hidma_ll_chstats
(
s
,
mchan
->
dmadev
->
lldev
,
mdesc
->
tre_ch
);
seq_puts
(
s
,
"completed
\n
"
);
list_for_each_entry
(
mdesc
,
&
mchan
->
completed
,
node
)
hidma_ll_chstats
(
s
,
mchan
->
dmadev
->
lldev
,
mdesc
->
tre_ch
);
hidma_ll_devstats
(
s
,
mchan
->
dmadev
->
lldev
);
pm_runtime_mark_last_busy
(
dmadev
->
ddev
.
dev
);
pm_runtime_put_autosuspend
(
dmadev
->
ddev
.
dev
);
return
0
;
}
/*
* hidma_dma_info: display HIDMA device info
*
* Display the info for the current HIDMA device.
*/
static
int
hidma_dma_info
(
struct
seq_file
*
s
,
void
*
unused
)
{
struct
hidma_dev
*
dmadev
=
s
->
private
;
resource_size_t
sz
;
seq_printf
(
s
,
"nr_descriptors=%d
\n
"
,
dmadev
->
nr_descriptors
);
seq_printf
(
s
,
"dev_trca=%p
\n
"
,
&
dmadev
->
dev_trca
);
seq_printf
(
s
,
"dev_trca_phys=%pa
\n
"
,
&
dmadev
->
trca_resource
->
start
);
sz
=
resource_size
(
dmadev
->
trca_resource
);
seq_printf
(
s
,
"dev_trca_size=%pa
\n
"
,
&
sz
);
seq_printf
(
s
,
"dev_evca=%p
\n
"
,
&
dmadev
->
dev_evca
);
seq_printf
(
s
,
"dev_evca_phys=%pa
\n
"
,
&
dmadev
->
evca_resource
->
start
);
sz
=
resource_size
(
dmadev
->
evca_resource
);
seq_printf
(
s
,
"dev_evca_size=%pa
\n
"
,
&
sz
);
return
0
;
}
static
int
hidma_chan_stats_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
single_open
(
file
,
hidma_chan_stats
,
inode
->
i_private
);
}
static
int
hidma_dma_info_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
single_open
(
file
,
hidma_dma_info
,
inode
->
i_private
);
}
static
const
struct
file_operations
hidma_chan_fops
=
{
.
open
=
hidma_chan_stats_open
,
.
read
=
seq_read
,
.
llseek
=
seq_lseek
,
.
release
=
single_release
,
};
static
const
struct
file_operations
hidma_dma_fops
=
{
.
open
=
hidma_dma_info_open
,
.
read
=
seq_read
,
.
llseek
=
seq_lseek
,
.
release
=
single_release
,
};
void
hidma_debug_uninit
(
struct
hidma_dev
*
dmadev
)
{
debugfs_remove_recursive
(
dmadev
->
debugfs
);
debugfs_remove_recursive
(
dmadev
->
stats
);
}
int
hidma_debug_init
(
struct
hidma_dev
*
dmadev
)
{
int
rc
=
0
;
int
chidx
=
0
;
struct
list_head
*
position
=
NULL
;
dmadev
->
debugfs
=
debugfs_create_dir
(
dev_name
(
dmadev
->
ddev
.
dev
),
NULL
);
if
(
!
dmadev
->
debugfs
)
{
rc
=
-
ENODEV
;
return
rc
;
}
/* walk through the virtual channel list */
list_for_each
(
position
,
&
dmadev
->
ddev
.
channels
)
{
struct
hidma_chan
*
chan
;
chan
=
list_entry
(
position
,
struct
hidma_chan
,
chan
.
device_node
);
sprintf
(
chan
->
dbg_name
,
"chan%d"
,
chidx
);
chan
->
debugfs
=
debugfs_create_dir
(
chan
->
dbg_name
,
dmadev
->
debugfs
);
if
(
!
chan
->
debugfs
)
{
rc
=
-
ENOMEM
;
goto
cleanup
;
}
chan
->
stats
=
debugfs_create_file
(
"stats"
,
S_IRUGO
,
chan
->
debugfs
,
chan
,
&
hidma_chan_fops
);
if
(
!
chan
->
stats
)
{
rc
=
-
ENOMEM
;
goto
cleanup
;
}
chidx
++
;
}
dmadev
->
stats
=
debugfs_create_file
(
"stats"
,
S_IRUGO
,
dmadev
->
debugfs
,
dmadev
,
&
hidma_dma_fops
);
if
(
!
dmadev
->
stats
)
{
rc
=
-
ENOMEM
;
goto
cleanup
;
}
return
0
;
cleanup:
hidma_debug_uninit
(
dmadev
);
return
rc
;
}
drivers/dma/qcom/hidma_ll.c
0 → 100644
View file @
82770a2f
/*
* Qualcomm Technologies HIDMA DMA engine low level code
*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/dmaengine.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/iopoll.h>
#include <linux/kfifo.h>
#include <linux/bitops.h>
#include "hidma.h"
#define HIDMA_EVRE_SIZE 16
/* each EVRE is 16 bytes */
#define HIDMA_TRCA_CTRLSTS_REG 0x000
#define HIDMA_TRCA_RING_LOW_REG 0x008
#define HIDMA_TRCA_RING_HIGH_REG 0x00C
#define HIDMA_TRCA_RING_LEN_REG 0x010
#define HIDMA_TRCA_DOORBELL_REG 0x400
#define HIDMA_EVCA_CTRLSTS_REG 0x000
#define HIDMA_EVCA_INTCTRL_REG 0x004
#define HIDMA_EVCA_RING_LOW_REG 0x008
#define HIDMA_EVCA_RING_HIGH_REG 0x00C
#define HIDMA_EVCA_RING_LEN_REG 0x010
#define HIDMA_EVCA_WRITE_PTR_REG 0x020
#define HIDMA_EVCA_DOORBELL_REG 0x400
#define HIDMA_EVCA_IRQ_STAT_REG 0x100
#define HIDMA_EVCA_IRQ_CLR_REG 0x108
#define HIDMA_EVCA_IRQ_EN_REG 0x110
#define HIDMA_EVRE_CFG_IDX 0
#define HIDMA_EVRE_ERRINFO_BIT_POS 24
#define HIDMA_EVRE_CODE_BIT_POS 28
#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
#define HIDMA_CH_STATE_MASK GENMASK(7, 0)
#define HIDMA_CH_STATE_BIT_POS 0x8
#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
do { \
iter += size; \
if (iter >= ring_size) \
iter -= ring_size; \
} while (0)
#define HIDMA_CH_STATE(val) \
((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
#define HIDMA_ERR_INT_MASK \
(BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
enum
ch_command
{
HIDMA_CH_DISABLE
=
0
,
HIDMA_CH_ENABLE
=
1
,
HIDMA_CH_SUSPEND
=
2
,
HIDMA_CH_RESET
=
9
,
};
enum
ch_state
{
HIDMA_CH_DISABLED
=
0
,
HIDMA_CH_ENABLED
=
1
,
HIDMA_CH_RUNNING
=
2
,
HIDMA_CH_SUSPENDED
=
3
,
HIDMA_CH_STOPPED
=
4
,
};
enum
tre_type
{
HIDMA_TRE_MEMCPY
=
3
,
};
enum
err_code
{
HIDMA_EVRE_STATUS_COMPLETE
=
1
,
HIDMA_EVRE_STATUS_ERROR
=
4
,
};
static
int
hidma_is_chan_enabled
(
int
state
)
{
switch
(
state
)
{
case
HIDMA_CH_ENABLED
:
case
HIDMA_CH_RUNNING
:
return
true
;
default:
return
false
;
}
}
void
hidma_ll_free
(
struct
hidma_lldev
*
lldev
,
u32
tre_ch
)
{
struct
hidma_tre
*
tre
;
if
(
tre_ch
>=
lldev
->
nr_tres
)
{
dev_err
(
lldev
->
dev
,
"invalid TRE number in free:%d"
,
tre_ch
);
return
;
}
tre
=
&
lldev
->
trepool
[
tre_ch
];
if
(
atomic_read
(
&
tre
->
allocated
)
!=
true
)
{
dev_err
(
lldev
->
dev
,
"trying to free an unused TRE:%d"
,
tre_ch
);
return
;
}
atomic_set
(
&
tre
->
allocated
,
0
);
}
int
hidma_ll_request
(
struct
hidma_lldev
*
lldev
,
u32
sig
,
const
char
*
dev_name
,
void
(
*
callback
)(
void
*
data
),
void
*
data
,
u32
*
tre_ch
)
{
unsigned
int
i
;
struct
hidma_tre
*
tre
;
u32
*
tre_local
;
if
(
!
tre_ch
||
!
lldev
)
return
-
EINVAL
;
/* need to have at least one empty spot in the queue */
for
(
i
=
0
;
i
<
lldev
->
nr_tres
-
1
;
i
++
)
{
if
(
atomic_add_unless
(
&
lldev
->
trepool
[
i
].
allocated
,
1
,
1
))
break
;
}
if
(
i
==
(
lldev
->
nr_tres
-
1
))
return
-
ENOMEM
;
tre
=
&
lldev
->
trepool
[
i
];
tre
->
dma_sig
=
sig
;
tre
->
dev_name
=
dev_name
;
tre
->
callback
=
callback
;
tre
->
data
=
data
;
tre
->
idx
=
i
;
tre
->
status
=
0
;
tre
->
queued
=
0
;
tre
->
err_code
=
0
;
tre
->
err_info
=
0
;
tre
->
lldev
=
lldev
;
tre_local
=
&
tre
->
tre_local
[
0
];
tre_local
[
HIDMA_TRE_CFG_IDX
]
=
HIDMA_TRE_MEMCPY
;
tre_local
[
HIDMA_TRE_CFG_IDX
]
|=
(
lldev
->
chidx
&
0xFF
)
<<
8
;
tre_local
[
HIDMA_TRE_CFG_IDX
]
|=
BIT
(
16
);
/* set IEOB */
*
tre_ch
=
i
;
if
(
callback
)
callback
(
data
);
return
0
;
}
/*
* Multiple TREs may be queued and waiting in the pending queue.
*/
static
void
hidma_ll_tre_complete
(
unsigned
long
arg
)
{
struct
hidma_lldev
*
lldev
=
(
struct
hidma_lldev
*
)
arg
;
struct
hidma_tre
*
tre
;
while
(
kfifo_out
(
&
lldev
->
handoff_fifo
,
&
tre
,
1
))
{
/* call the user if it has been read by the hardware */
if
(
tre
->
callback
)
tre
->
callback
(
tre
->
data
);
}
}
static
int
hidma_post_completed
(
struct
hidma_lldev
*
lldev
,
int
tre_iterator
,
u8
err_info
,
u8
err_code
)
{
struct
hidma_tre
*
tre
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
lldev
->
lock
,
flags
);
tre
=
lldev
->
pending_tre_list
[
tre_iterator
/
HIDMA_TRE_SIZE
];
if
(
!
tre
)
{
spin_unlock_irqrestore
(
&
lldev
->
lock
,
flags
);
dev_warn
(
lldev
->
dev
,
"tre_index [%d] and tre out of sync
\n
"
,
tre_iterator
/
HIDMA_TRE_SIZE
);
return
-
EINVAL
;
}
lldev
->
pending_tre_list
[
tre
->
tre_index
]
=
NULL
;
/*
* Keep track of pending TREs that SW is expecting to receive
* from HW. We got one now. Decrement our counter.
*/
lldev
->
pending_tre_count
--
;
if
(
lldev
->
pending_tre_count
<
0
)
{
dev_warn
(
lldev
->
dev
,
"tre count mismatch on completion"
);
lldev
->
pending_tre_count
=
0
;
}
spin_unlock_irqrestore
(
&
lldev
->
lock
,
flags
);
tre
->
err_info
=
err_info
;
tre
->
err_code
=
err_code
;
tre
->
queued
=
0
;
kfifo_put
(
&
lldev
->
handoff_fifo
,
tre
);
tasklet_schedule
(
&
lldev
->
task
);
return
0
;
}
/*
* Called to handle the interrupt for the channel.
* Return a positive number if TRE or EVRE were consumed on this run.
* Return a positive number if there are pending TREs or EVREs.
* Return 0 if there is nothing to consume or no pending TREs/EVREs found.
*/
static
int
hidma_handle_tre_completion
(
struct
hidma_lldev
*
lldev
)
{
u32
evre_ring_size
=
lldev
->
evre_ring_size
;
u32
tre_ring_size
=
lldev
->
tre_ring_size
;
u32
err_info
,
err_code
,
evre_write_off
;
u32
tre_iterator
,
evre_iterator
;
u32
num_completed
=
0
;
evre_write_off
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_WRITE_PTR_REG
);
tre_iterator
=
lldev
->
tre_processed_off
;
evre_iterator
=
lldev
->
evre_processed_off
;
if
((
evre_write_off
>
evre_ring_size
)
||
(
evre_write_off
%
HIDMA_EVRE_SIZE
))
{
dev_err
(
lldev
->
dev
,
"HW reports invalid EVRE write offset
\n
"
);
return
0
;
}
/*
* By the time control reaches here the number of EVREs and TREs
* may not match. Only consume the ones that hardware told us.
*/
while
((
evre_iterator
!=
evre_write_off
))
{
u32
*
current_evre
=
lldev
->
evre_ring
+
evre_iterator
;
u32
cfg
;
cfg
=
current_evre
[
HIDMA_EVRE_CFG_IDX
];
err_info
=
cfg
>>
HIDMA_EVRE_ERRINFO_BIT_POS
;
err_info
&=
HIDMA_EVRE_ERRINFO_MASK
;
err_code
=
(
cfg
>>
HIDMA_EVRE_CODE_BIT_POS
)
&
HIDMA_EVRE_CODE_MASK
;
if
(
hidma_post_completed
(
lldev
,
tre_iterator
,
err_info
,
err_code
))
break
;
HIDMA_INCREMENT_ITERATOR
(
tre_iterator
,
HIDMA_TRE_SIZE
,
tre_ring_size
);
HIDMA_INCREMENT_ITERATOR
(
evre_iterator
,
HIDMA_EVRE_SIZE
,
evre_ring_size
);
/*
* Read the new event descriptor written by the HW.
* As we are processing the delivered events, other events
* get queued to the SW for processing.
*/
evre_write_off
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_WRITE_PTR_REG
);
num_completed
++
;
}
if
(
num_completed
)
{
u32
evre_read_off
=
(
lldev
->
evre_processed_off
+
HIDMA_EVRE_SIZE
*
num_completed
);
u32
tre_read_off
=
(
lldev
->
tre_processed_off
+
HIDMA_TRE_SIZE
*
num_completed
);
evre_read_off
=
evre_read_off
%
evre_ring_size
;
tre_read_off
=
tre_read_off
%
tre_ring_size
;
writel
(
evre_read_off
,
lldev
->
evca
+
HIDMA_EVCA_DOORBELL_REG
);
/* record the last processed tre offset */
lldev
->
tre_processed_off
=
tre_read_off
;
lldev
->
evre_processed_off
=
evre_read_off
;
}
return
num_completed
;
}
void
hidma_cleanup_pending_tre
(
struct
hidma_lldev
*
lldev
,
u8
err_info
,
u8
err_code
)
{
u32
tre_iterator
;
u32
tre_ring_size
=
lldev
->
tre_ring_size
;
int
num_completed
=
0
;
u32
tre_read_off
;
tre_iterator
=
lldev
->
tre_processed_off
;
while
(
lldev
->
pending_tre_count
)
{
if
(
hidma_post_completed
(
lldev
,
tre_iterator
,
err_info
,
err_code
))
break
;
HIDMA_INCREMENT_ITERATOR
(
tre_iterator
,
HIDMA_TRE_SIZE
,
tre_ring_size
);
num_completed
++
;
}
tre_read_off
=
(
lldev
->
tre_processed_off
+
HIDMA_TRE_SIZE
*
num_completed
);
tre_read_off
=
tre_read_off
%
tre_ring_size
;
/* record the last processed tre offset */
lldev
->
tre_processed_off
=
tre_read_off
;
}
static
int
hidma_ll_reset
(
struct
hidma_lldev
*
lldev
)
{
u32
val
;
int
ret
;
val
=
readl
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_RESET
<<
16
;
writel
(
val
,
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
/*
* Delay 10ms after reset to allow DMA logic to quiesce.
* Do a polled read up to 1ms and 10ms maximum.
*/
ret
=
readl_poll_timeout
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
,
val
,
HIDMA_CH_STATE
(
val
)
==
HIDMA_CH_DISABLED
,
1000
,
10000
);
if
(
ret
)
{
dev_err
(
lldev
->
dev
,
"transfer channel did not reset
\n
"
);
return
ret
;
}
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_RESET
<<
16
;
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
/*
* Delay 10ms after reset to allow DMA logic to quiesce.
* Do a polled read up to 1ms and 10ms maximum.
*/
ret
=
readl_poll_timeout
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
,
val
,
HIDMA_CH_STATE
(
val
)
==
HIDMA_CH_DISABLED
,
1000
,
10000
);
if
(
ret
)
return
ret
;
lldev
->
trch_state
=
HIDMA_CH_DISABLED
;
lldev
->
evch_state
=
HIDMA_CH_DISABLED
;
return
0
;
}
/*
* Abort all transactions and perform a reset.
*/
static
void
hidma_ll_abort
(
unsigned
long
arg
)
{
struct
hidma_lldev
*
lldev
=
(
struct
hidma_lldev
*
)
arg
;
u8
err_code
=
HIDMA_EVRE_STATUS_ERROR
;
u8
err_info
=
0xFF
;
int
rc
;
hidma_cleanup_pending_tre
(
lldev
,
err_info
,
err_code
);
/* reset the channel for recovery */
rc
=
hidma_ll_setup
(
lldev
);
if
(
rc
)
{
dev_err
(
lldev
->
dev
,
"channel reinitialize failed after error
\n
"
);
return
;
}
writel
(
ENABLE_IRQS
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
}
/*
* The interrupt handler for HIDMA will try to consume as many pending
* EVRE from the event queue as possible. Each EVRE has an associated
* TRE that holds the user interface parameters. EVRE reports the
* result of the transaction. Hardware guarantees ordering between EVREs
* and TREs. We use last processed offset to figure out which TRE is
* associated with which EVRE. If two TREs are consumed by HW, the EVREs
* are in order in the event ring.
*
* This handler will do a one pass for consuming EVREs. Other EVREs may
* be delivered while we are working. It will try to consume incoming
* EVREs one more time and return.
*
* For unprocessed EVREs, hardware will trigger another interrupt until
* all the interrupt bits are cleared.
*
* Hardware guarantees that by the time interrupt is observed, all data
* transactions in flight are delivered to their respective places and
* are visible to the CPU.
*
* On demand paging for IOMMU is only supported for PCIe via PRI
* (Page Request Interface) not for HIDMA. All other hardware instances
* including HIDMA work on pinned DMA addresses.
*
* HIDMA is not aware of IOMMU presence since it follows the DMA API. All
* IOMMU latency will be built into the data movement time. By the time
* interrupt happens, IOMMU lookups + data movement has already taken place.
*
* While the first read in a typical PCI endpoint ISR flushes all outstanding
* requests traditionally to the destination, this concept does not apply
* here for this HW.
*/
irqreturn_t
hidma_ll_inthandler
(
int
chirq
,
void
*
arg
)
{
struct
hidma_lldev
*
lldev
=
arg
;
u32
status
;
u32
enable
;
u32
cause
;
/*
* Fine tuned for this HW...
*
* This ISR has been designed for this particular hardware. Relaxed
* read and write accessors are used for performance reasons due to
* interrupt delivery guarantees. Do not copy this code blindly and
* expect that to work.
*/
status
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_STAT_REG
);
enable
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
cause
=
status
&
enable
;
while
(
cause
)
{
if
(
cause
&
HIDMA_ERR_INT_MASK
)
{
dev_err
(
lldev
->
dev
,
"error 0x%x, resetting...
\n
"
,
cause
);
/* Clear out pending interrupts */
writel
(
cause
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
tasklet_schedule
(
&
lldev
->
rst_task
);
goto
out
;
}
/*
* Try to consume as many EVREs as possible.
*/
hidma_handle_tre_completion
(
lldev
);
/* We consumed TREs or there are pending TREs or EVREs. */
writel_relaxed
(
cause
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
/*
* Another interrupt might have arrived while we are
* processing this one. Read the new cause.
*/
status
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_STAT_REG
);
enable
=
readl_relaxed
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
cause
=
status
&
enable
;
}
out:
return
IRQ_HANDLED
;
}
int
hidma_ll_enable
(
struct
hidma_lldev
*
lldev
)
{
u32
val
;
int
ret
;
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_ENABLE
<<
16
;
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
ret
=
readl_poll_timeout
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
,
val
,
hidma_is_chan_enabled
(
HIDMA_CH_STATE
(
val
)),
1000
,
10000
);
if
(
ret
)
{
dev_err
(
lldev
->
dev
,
"event channel did not get enabled
\n
"
);
return
ret
;
}
val
=
readl
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_ENABLE
<<
16
;
writel
(
val
,
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
ret
=
readl_poll_timeout
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
,
val
,
hidma_is_chan_enabled
(
HIDMA_CH_STATE
(
val
)),
1000
,
10000
);
if
(
ret
)
{
dev_err
(
lldev
->
dev
,
"transfer channel did not get enabled
\n
"
);
return
ret
;
}
lldev
->
trch_state
=
HIDMA_CH_ENABLED
;
lldev
->
evch_state
=
HIDMA_CH_ENABLED
;
return
0
;
}
void
hidma_ll_start
(
struct
hidma_lldev
*
lldev
)
{
unsigned
long
irqflags
;
spin_lock_irqsave
(
&
lldev
->
lock
,
irqflags
);
writel
(
lldev
->
tre_write_offset
,
lldev
->
trca
+
HIDMA_TRCA_DOORBELL_REG
);
spin_unlock_irqrestore
(
&
lldev
->
lock
,
irqflags
);
}
bool
hidma_ll_isenabled
(
struct
hidma_lldev
*
lldev
)
{
u32
val
;
val
=
readl
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
lldev
->
trch_state
=
HIDMA_CH_STATE
(
val
);
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
lldev
->
evch_state
=
HIDMA_CH_STATE
(
val
);
/* both channels have to be enabled before calling this function */
if
(
hidma_is_chan_enabled
(
lldev
->
trch_state
)
&&
hidma_is_chan_enabled
(
lldev
->
evch_state
))
return
true
;
return
false
;
}
void
hidma_ll_queue_request
(
struct
hidma_lldev
*
lldev
,
u32
tre_ch
)
{
struct
hidma_tre
*
tre
;
unsigned
long
flags
;
tre
=
&
lldev
->
trepool
[
tre_ch
];
/* copy the TRE into its location in the TRE ring */
spin_lock_irqsave
(
&
lldev
->
lock
,
flags
);
tre
->
tre_index
=
lldev
->
tre_write_offset
/
HIDMA_TRE_SIZE
;
lldev
->
pending_tre_list
[
tre
->
tre_index
]
=
tre
;
memcpy
(
lldev
->
tre_ring
+
lldev
->
tre_write_offset
,
&
tre
->
tre_local
[
0
],
HIDMA_TRE_SIZE
);
tre
->
err_code
=
0
;
tre
->
err_info
=
0
;
tre
->
queued
=
1
;
lldev
->
pending_tre_count
++
;
lldev
->
tre_write_offset
=
(
lldev
->
tre_write_offset
+
HIDMA_TRE_SIZE
)
%
lldev
->
tre_ring_size
;
spin_unlock_irqrestore
(
&
lldev
->
lock
,
flags
);
}
/*
* Note that even though we stop this channel if there is a pending transaction
* in flight it will complete and follow the callback. This request will
* prevent further requests to be made.
*/
int
hidma_ll_disable
(
struct
hidma_lldev
*
lldev
)
{
u32
val
;
int
ret
;
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
lldev
->
evch_state
=
HIDMA_CH_STATE
(
val
);
val
=
readl
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
lldev
->
trch_state
=
HIDMA_CH_STATE
(
val
);
/* already suspended by this OS */
if
((
lldev
->
trch_state
==
HIDMA_CH_SUSPENDED
)
||
(
lldev
->
evch_state
==
HIDMA_CH_SUSPENDED
))
return
0
;
/* already stopped by the manager */
if
((
lldev
->
trch_state
==
HIDMA_CH_STOPPED
)
||
(
lldev
->
evch_state
==
HIDMA_CH_STOPPED
))
return
0
;
val
=
readl
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_SUSPEND
<<
16
;
writel
(
val
,
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
);
/*
* Start the wait right after the suspend is confirmed.
* Do a polled read up to 1ms and 10ms maximum.
*/
ret
=
readl_poll_timeout
(
lldev
->
trca
+
HIDMA_TRCA_CTRLSTS_REG
,
val
,
HIDMA_CH_STATE
(
val
)
==
HIDMA_CH_SUSPENDED
,
1000
,
10000
);
if
(
ret
)
return
ret
;
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
val
&=
~
(
HIDMA_CH_CONTROL_MASK
<<
16
);
val
|=
HIDMA_CH_SUSPEND
<<
16
;
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
);
/*
* Start the wait right after the suspend is confirmed
* Delay up to 10ms after reset to allow DMA logic to quiesce.
*/
ret
=
readl_poll_timeout
(
lldev
->
evca
+
HIDMA_EVCA_CTRLSTS_REG
,
val
,
HIDMA_CH_STATE
(
val
)
==
HIDMA_CH_SUSPENDED
,
1000
,
10000
);
if
(
ret
)
return
ret
;
lldev
->
trch_state
=
HIDMA_CH_SUSPENDED
;
lldev
->
evch_state
=
HIDMA_CH_SUSPENDED
;
return
0
;
}
void
hidma_ll_set_transfer_params
(
struct
hidma_lldev
*
lldev
,
u32
tre_ch
,
dma_addr_t
src
,
dma_addr_t
dest
,
u32
len
,
u32
flags
)
{
struct
hidma_tre
*
tre
;
u32
*
tre_local
;
if
(
tre_ch
>=
lldev
->
nr_tres
)
{
dev_err
(
lldev
->
dev
,
"invalid TRE number in transfer params:%d"
,
tre_ch
);
return
;
}
tre
=
&
lldev
->
trepool
[
tre_ch
];
if
(
atomic_read
(
&
tre
->
allocated
)
!=
true
)
{
dev_err
(
lldev
->
dev
,
"trying to set params on an unused TRE:%d"
,
tre_ch
);
return
;
}
tre_local
=
&
tre
->
tre_local
[
0
];
tre_local
[
HIDMA_TRE_LEN_IDX
]
=
len
;
tre_local
[
HIDMA_TRE_SRC_LOW_IDX
]
=
lower_32_bits
(
src
);
tre_local
[
HIDMA_TRE_SRC_HI_IDX
]
=
upper_32_bits
(
src
);
tre_local
[
HIDMA_TRE_DEST_LOW_IDX
]
=
lower_32_bits
(
dest
);
tre_local
[
HIDMA_TRE_DEST_HI_IDX
]
=
upper_32_bits
(
dest
);
tre
->
int_flags
=
flags
;
}
/*
* Called during initialization and after an error condition
* to restore hardware state.
*/
int
hidma_ll_setup
(
struct
hidma_lldev
*
lldev
)
{
int
rc
;
u64
addr
;
u32
val
;
u32
nr_tres
=
lldev
->
nr_tres
;
lldev
->
pending_tre_count
=
0
;
lldev
->
tre_processed_off
=
0
;
lldev
->
evre_processed_off
=
0
;
lldev
->
tre_write_offset
=
0
;
/* disable interrupts */
writel
(
0
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
/* clear all pending interrupts */
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_STAT_REG
);
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
rc
=
hidma_ll_reset
(
lldev
);
if
(
rc
)
return
rc
;
/*
* Clear all pending interrupts again.
* Otherwise, we observe reset complete interrupts.
*/
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_STAT_REG
);
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
/* disable interrupts again after reset */
writel
(
0
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
addr
=
lldev
->
tre_dma
;
writel
(
lower_32_bits
(
addr
),
lldev
->
trca
+
HIDMA_TRCA_RING_LOW_REG
);
writel
(
upper_32_bits
(
addr
),
lldev
->
trca
+
HIDMA_TRCA_RING_HIGH_REG
);
writel
(
lldev
->
tre_ring_size
,
lldev
->
trca
+
HIDMA_TRCA_RING_LEN_REG
);
addr
=
lldev
->
evre_dma
;
writel
(
lower_32_bits
(
addr
),
lldev
->
evca
+
HIDMA_EVCA_RING_LOW_REG
);
writel
(
upper_32_bits
(
addr
),
lldev
->
evca
+
HIDMA_EVCA_RING_HIGH_REG
);
writel
(
HIDMA_EVRE_SIZE
*
nr_tres
,
lldev
->
evca
+
HIDMA_EVCA_RING_LEN_REG
);
/* support IRQ only for now */
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_INTCTRL_REG
);
val
&=
~
0xF
;
val
|=
0x1
;
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_INTCTRL_REG
);
/* clear all pending interrupts and enable them */
writel
(
ENABLE_IRQS
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
writel
(
ENABLE_IRQS
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
return
hidma_ll_enable
(
lldev
);
}
struct
hidma_lldev
*
hidma_ll_init
(
struct
device
*
dev
,
u32
nr_tres
,
void
__iomem
*
trca
,
void
__iomem
*
evca
,
u8
chidx
)
{
u32
required_bytes
;
struct
hidma_lldev
*
lldev
;
int
rc
;
size_t
sz
;
if
(
!
trca
||
!
evca
||
!
dev
||
!
nr_tres
)
return
NULL
;
/* need at least four TREs */
if
(
nr_tres
<
4
)
return
NULL
;
/* need an extra space */
nr_tres
+=
1
;
lldev
=
devm_kzalloc
(
dev
,
sizeof
(
struct
hidma_lldev
),
GFP_KERNEL
);
if
(
!
lldev
)
return
NULL
;
lldev
->
evca
=
evca
;
lldev
->
trca
=
trca
;
lldev
->
dev
=
dev
;
sz
=
sizeof
(
struct
hidma_tre
);
lldev
->
trepool
=
devm_kcalloc
(
lldev
->
dev
,
nr_tres
,
sz
,
GFP_KERNEL
);
if
(
!
lldev
->
trepool
)
return
NULL
;
required_bytes
=
sizeof
(
lldev
->
pending_tre_list
[
0
]);
lldev
->
pending_tre_list
=
devm_kcalloc
(
dev
,
nr_tres
,
required_bytes
,
GFP_KERNEL
);
if
(
!
lldev
->
pending_tre_list
)
return
NULL
;
sz
=
(
HIDMA_TRE_SIZE
+
1
)
*
nr_tres
;
lldev
->
tre_ring
=
dmam_alloc_coherent
(
dev
,
sz
,
&
lldev
->
tre_dma
,
GFP_KERNEL
);
if
(
!
lldev
->
tre_ring
)
return
NULL
;
memset
(
lldev
->
tre_ring
,
0
,
(
HIDMA_TRE_SIZE
+
1
)
*
nr_tres
);
lldev
->
tre_ring_size
=
HIDMA_TRE_SIZE
*
nr_tres
;
lldev
->
nr_tres
=
nr_tres
;
/* the TRE ring has to be TRE_SIZE aligned */
if
(
!
IS_ALIGNED
(
lldev
->
tre_dma
,
HIDMA_TRE_SIZE
))
{
u8
tre_ring_shift
;
tre_ring_shift
=
lldev
->
tre_dma
%
HIDMA_TRE_SIZE
;
tre_ring_shift
=
HIDMA_TRE_SIZE
-
tre_ring_shift
;
lldev
->
tre_dma
+=
tre_ring_shift
;
lldev
->
tre_ring
+=
tre_ring_shift
;
}
sz
=
(
HIDMA_EVRE_SIZE
+
1
)
*
nr_tres
;
lldev
->
evre_ring
=
dmam_alloc_coherent
(
dev
,
sz
,
&
lldev
->
evre_dma
,
GFP_KERNEL
);
if
(
!
lldev
->
evre_ring
)
return
NULL
;
memset
(
lldev
->
evre_ring
,
0
,
(
HIDMA_EVRE_SIZE
+
1
)
*
nr_tres
);
lldev
->
evre_ring_size
=
HIDMA_EVRE_SIZE
*
nr_tres
;
/* the EVRE ring has to be EVRE_SIZE aligned */
if
(
!
IS_ALIGNED
(
lldev
->
evre_dma
,
HIDMA_EVRE_SIZE
))
{
u8
evre_ring_shift
;
evre_ring_shift
=
lldev
->
evre_dma
%
HIDMA_EVRE_SIZE
;
evre_ring_shift
=
HIDMA_EVRE_SIZE
-
evre_ring_shift
;
lldev
->
evre_dma
+=
evre_ring_shift
;
lldev
->
evre_ring
+=
evre_ring_shift
;
}
lldev
->
nr_tres
=
nr_tres
;
lldev
->
chidx
=
chidx
;
sz
=
nr_tres
*
sizeof
(
struct
hidma_tre
*
);
rc
=
kfifo_alloc
(
&
lldev
->
handoff_fifo
,
sz
,
GFP_KERNEL
);
if
(
rc
)
return
NULL
;
rc
=
hidma_ll_setup
(
lldev
);
if
(
rc
)
return
NULL
;
spin_lock_init
(
&
lldev
->
lock
);
tasklet_init
(
&
lldev
->
rst_task
,
hidma_ll_abort
,
(
unsigned
long
)
lldev
);
tasklet_init
(
&
lldev
->
task
,
hidma_ll_tre_complete
,
(
unsigned
long
)
lldev
);
lldev
->
initialized
=
1
;
writel
(
ENABLE_IRQS
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
return
lldev
;
}
int
hidma_ll_uninit
(
struct
hidma_lldev
*
lldev
)
{
u32
required_bytes
;
int
rc
=
0
;
u32
val
;
if
(
!
lldev
)
return
-
ENODEV
;
if
(
!
lldev
->
initialized
)
return
0
;
lldev
->
initialized
=
0
;
required_bytes
=
sizeof
(
struct
hidma_tre
)
*
lldev
->
nr_tres
;
tasklet_kill
(
&
lldev
->
task
);
memset
(
lldev
->
trepool
,
0
,
required_bytes
);
lldev
->
trepool
=
NULL
;
lldev
->
pending_tre_count
=
0
;
lldev
->
tre_write_offset
=
0
;
rc
=
hidma_ll_reset
(
lldev
);
/*
* Clear all pending interrupts again.
* Otherwise, we observe reset complete interrupts.
*/
val
=
readl
(
lldev
->
evca
+
HIDMA_EVCA_IRQ_STAT_REG
);
writel
(
val
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_CLR_REG
);
writel
(
0
,
lldev
->
evca
+
HIDMA_EVCA_IRQ_EN_REG
);
return
rc
;
}
enum
dma_status
hidma_ll_status
(
struct
hidma_lldev
*
lldev
,
u32
tre_ch
)
{
enum
dma_status
ret
=
DMA_ERROR
;
struct
hidma_tre
*
tre
;
unsigned
long
flags
;
u8
err_code
;
spin_lock_irqsave
(
&
lldev
->
lock
,
flags
);
tre
=
&
lldev
->
trepool
[
tre_ch
];
err_code
=
tre
->
err_code
;
if
(
err_code
&
HIDMA_EVRE_STATUS_COMPLETE
)
ret
=
DMA_COMPLETE
;
else
if
(
err_code
&
HIDMA_EVRE_STATUS_ERROR
)
ret
=
DMA_ERROR
;
else
ret
=
DMA_IN_PROGRESS
;
spin_unlock_irqrestore
(
&
lldev
->
lock
,
flags
);
return
ret
;
}
drivers/dma/qcom/hidma_mgmt.c
View file @
82770a2f
/*
* Qualcomm Technologies HIDMA DMA engine Management interface
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2015
-2016
, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
...
...
@@ -17,13 +17,14 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
#include <linux/
interrupt
.h>
#include <linux/
platform_device
.h>
#include <linux/
of_irq
.h>
#include <linux/
of_platform
.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include "hidma_mgmt.h"
...
...
@@ -298,5 +299,109 @@ static struct platform_driver hidma_mgmt_driver = {
},
};
module_platform_driver
(
hidma_mgmt_driver
);
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
static
int
object_counter
;
static
int
__init
hidma_mgmt_of_populate_channels
(
struct
device_node
*
np
)
{
struct
platform_device
*
pdev_parent
=
of_find_device_by_node
(
np
);
struct
platform_device_info
pdevinfo
;
struct
of_phandle_args
out_irq
;
struct
device_node
*
child
;
struct
resource
*
res
;
const
__be32
*
cell
;
int
ret
=
0
,
size
,
i
,
num
;
u64
addr
,
addr_size
;
for_each_available_child_of_node
(
np
,
child
)
{
struct
resource
*
res_iter
;
struct
platform_device
*
new_pdev
;
cell
=
of_get_property
(
child
,
"reg"
,
&
size
);
if
(
!
cell
)
{
ret
=
-
EINVAL
;
goto
out
;
}
size
/=
sizeof
(
*
cell
);
num
=
size
/
(
of_n_addr_cells
(
child
)
+
of_n_size_cells
(
child
))
+
1
;
/* allocate a resource array */
res
=
kcalloc
(
num
,
sizeof
(
*
res
),
GFP_KERNEL
);
if
(
!
res
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
/* read each reg value */
i
=
0
;
res_iter
=
res
;
while
(
i
<
size
)
{
addr
=
of_read_number
(
&
cell
[
i
],
of_n_addr_cells
(
child
));
i
+=
of_n_addr_cells
(
child
);
addr_size
=
of_read_number
(
&
cell
[
i
],
of_n_size_cells
(
child
));
i
+=
of_n_size_cells
(
child
);
res_iter
->
start
=
addr
;
res_iter
->
end
=
res_iter
->
start
+
addr_size
-
1
;
res_iter
->
flags
=
IORESOURCE_MEM
;
res_iter
++
;
}
ret
=
of_irq_parse_one
(
child
,
0
,
&
out_irq
);
if
(
ret
)
goto
out
;
res_iter
->
start
=
irq_create_of_mapping
(
&
out_irq
);
res_iter
->
name
=
"hidma event irq"
;
res_iter
->
flags
=
IORESOURCE_IRQ
;
memset
(
&
pdevinfo
,
0
,
sizeof
(
pdevinfo
));
pdevinfo
.
fwnode
=
&
child
->
fwnode
;
pdevinfo
.
parent
=
pdev_parent
?
&
pdev_parent
->
dev
:
NULL
;
pdevinfo
.
name
=
child
->
name
;
pdevinfo
.
id
=
object_counter
++
;
pdevinfo
.
res
=
res
;
pdevinfo
.
num_res
=
num
;
pdevinfo
.
data
=
NULL
;
pdevinfo
.
size_data
=
0
;
pdevinfo
.
dma_mask
=
DMA_BIT_MASK
(
64
);
new_pdev
=
platform_device_register_full
(
&
pdevinfo
);
if
(
!
new_pdev
)
{
ret
=
-
ENODEV
;
goto
out
;
}
of_dma_configure
(
&
new_pdev
->
dev
,
child
);
kfree
(
res
);
res
=
NULL
;
}
out:
kfree
(
res
);
return
ret
;
}
#endif
static
int
__init
hidma_mgmt_init
(
void
)
{
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct
device_node
*
child
;
for
(
child
=
of_find_matching_node
(
NULL
,
hidma_mgmt_match
);
child
;
child
=
of_find_matching_node
(
child
,
hidma_mgmt_match
))
{
/* device tree based firmware here */
hidma_mgmt_of_populate_channels
(
child
);
of_node_put
(
child
);
}
#endif
platform_driver_register
(
&
hidma_mgmt_driver
);
return
0
;
}
module_init
(
hidma_mgmt_init
);
MODULE_LICENSE
(
"GPL v2"
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment