Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
bfda9020
Commit
bfda9020
authored
Oct 24, 2018
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/owl' into for-linus
parents
4fa7393b
a3e40316
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
276 additions
and
7 deletions
+276
-7
drivers/dma/owl-dma.c
drivers/dma/owl-dma.c
+276
-7
No files found.
drivers/dma/owl-dma.c
View file @
bfda9020
...
...
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/slab.h>
#include "virt-dma.h"
...
...
@@ -161,10 +162,12 @@ struct owl_dma_lli {
* struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @lli_list: link list of lli nodes
* @cyclic: flag to indicate cyclic transfers
*/
struct
owl_dma_txd
{
struct
virt_dma_desc
vd
;
struct
list_head
lli_list
;
bool
cyclic
;
};
/**
...
...
@@ -186,11 +189,15 @@ struct owl_dma_pchan {
* @vc: wrappped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
* @drq: physical DMA request ID for this channel
*/
struct
owl_dma_vchan
{
struct
virt_dma_chan
vc
;
struct
owl_dma_pchan
*
pchan
;
struct
owl_dma_txd
*
txd
;
struct
dma_slave_config
cfg
;
u8
drq
;
};
/**
...
...
@@ -200,6 +207,7 @@ struct owl_dma_vchan {
* @clk: clock for the DMA controller
* @lock: a lock to use when change DMA controller global register
* @lli_pool: a pool for the LLI descriptors
* @irq: interrupt ID for the DMA controller
* @nr_pchans: the number of physical channels
* @pchans: array of data for the physical channels
* @nr_vchans: the number of physical channels
...
...
@@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
static
struct
owl_dma_lli
*
owl_dma_add_lli
(
struct
owl_dma_txd
*
txd
,
struct
owl_dma_lli
*
prev
,
struct
owl_dma_lli
*
next
)
struct
owl_dma_lli
*
next
,
bool
is_cyclic
)
{
list_add_tail
(
&
next
->
node
,
&
txd
->
lli_list
);
if
(
!
is_cyclic
)
list_add_tail
(
&
next
->
node
,
&
txd
->
lli_list
);
if
(
prev
)
{
prev
->
hw
.
next_lli
=
next
->
phys
;
...
...
@@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
static
inline
int
owl_dma_cfg_lli
(
struct
owl_dma_vchan
*
vchan
,
struct
owl_dma_lli
*
lli
,
dma_addr_t
src
,
dma_addr_t
dst
,
u32
len
,
enum
dma_transfer_direction
dir
)
u32
len
,
enum
dma_transfer_direction
dir
,
struct
dma_slave_config
*
sconfig
,
bool
is_cyclic
)
{
struct
owl_dma_lli_hw
*
hw
=
&
lli
->
hw
;
u32
mode
;
...
...
@@ -364,6 +376,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_MODE_DT_DCU
|
OWL_DMA_MODE_SAM_INC
|
OWL_DMA_MODE_DAM_INC
;
break
;
case
DMA_MEM_TO_DEV
:
mode
|=
OWL_DMA_MODE_TS
(
vchan
->
drq
)
|
OWL_DMA_MODE_ST_DCU
|
OWL_DMA_MODE_DT_DEV
|
OWL_DMA_MODE_SAM_INC
|
OWL_DMA_MODE_DAM_CONST
;
/*
* Hardware only supports 32bit and 8bit buswidth. Since the
* default is 32bit, select 8bit only when requested.
*/
if
(
sconfig
->
dst_addr_width
==
DMA_SLAVE_BUSWIDTH_1_BYTE
)
mode
|=
OWL_DMA_MODE_NDDBW_8BIT
;
break
;
case
DMA_DEV_TO_MEM
:
mode
|=
OWL_DMA_MODE_TS
(
vchan
->
drq
)
|
OWL_DMA_MODE_ST_DEV
|
OWL_DMA_MODE_DT_DCU
|
OWL_DMA_MODE_SAM_CONST
|
OWL_DMA_MODE_DAM_INC
;
/*
* Hardware only supports 32bit and 8bit buswidth. Since the
* default is 32bit, select 8bit only when requested.
*/
if
(
sconfig
->
src_addr_width
==
DMA_SLAVE_BUSWIDTH_1_BYTE
)
mode
|=
OWL_DMA_MODE_NDDBW_8BIT
;
break
;
default:
return
-
EINVAL
;
...
...
@@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_LLC_SAV_LOAD_NEXT
|
OWL_DMA_LLC_DAV_LOAD_NEXT
);
hw
->
ctrlb
=
llc_hw_ctrlb
(
OWL_DMA_INTCTL_SUPER_BLOCK
);
if
(
is_cyclic
)
hw
->
ctrlb
=
llc_hw_ctrlb
(
OWL_DMA_INTCTL_BLOCK
);
else
hw
->
ctrlb
=
llc_hw_ctrlb
(
OWL_DMA_INTCTL_SUPER_BLOCK
);
return
0
;
}
...
...
@@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
spin_unlock_irqrestore
(
&
od
->
lock
,
flags
);
}
static
void
owl_dma_pause_pchan
(
struct
owl_dma_pchan
*
pchan
)
{
pchan_writel
(
pchan
,
1
,
OWL_DMAX_PAUSE
);
}
static
void
owl_dma_resume_pchan
(
struct
owl_dma_pchan
*
pchan
)
{
pchan_writel
(
pchan
,
0
,
OWL_DMAX_PAUSE
);
}
static
int
owl_dma_start_next_txd
(
struct
owl_dma_vchan
*
vchan
)
{
struct
owl_dma
*
od
=
to_owl_dma
(
vchan
->
vc
.
chan
.
device
);
...
...
@@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
lli
=
list_first_entry
(
&
txd
->
lli_list
,
struct
owl_dma_lli
,
node
);
int_ctl
=
OWL_DMA_INTCTL_SUPER_BLOCK
;
if
(
txd
->
cyclic
)
int_ctl
=
OWL_DMA_INTCTL_BLOCK
;
else
int_ctl
=
OWL_DMA_INTCTL_SUPER_BLOCK
;
pchan_writel
(
pchan
,
OWL_DMAX_MODE
,
OWL_DMA_MODE_LME
);
pchan_writel
(
pchan
,
OWL_DMAX_LINKLIST_CTL
,
...
...
@@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
return
0
;
}
static
int
owl_dma_config
(
struct
dma_chan
*
chan
,
struct
dma_slave_config
*
config
)
{
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
/* Reject definitely invalid configurations */
if
(
config
->
src_addr_width
==
DMA_SLAVE_BUSWIDTH_8_BYTES
||
config
->
dst_addr_width
==
DMA_SLAVE_BUSWIDTH_8_BYTES
)
return
-
EINVAL
;
memcpy
(
&
vchan
->
cfg
,
config
,
sizeof
(
struct
dma_slave_config
));
return
0
;
}
static
int
owl_dma_pause
(
struct
dma_chan
*
chan
)
{
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
vchan
->
vc
.
lock
,
flags
);
owl_dma_pause_pchan
(
vchan
->
pchan
);
spin_unlock_irqrestore
(
&
vchan
->
vc
.
lock
,
flags
);
return
0
;
}
static
int
owl_dma_resume
(
struct
dma_chan
*
chan
)
{
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
unsigned
long
flags
;
if
(
!
vchan
->
pchan
&&
!
vchan
->
txd
)
return
0
;
dev_dbg
(
chan2dev
(
chan
),
"vchan %p: resume
\n
"
,
&
vchan
->
vc
);
spin_lock_irqsave
(
&
vchan
->
vc
.
lock
,
flags
);
owl_dma_resume_pchan
(
vchan
->
pchan
);
spin_unlock_irqrestore
(
&
vchan
->
vc
.
lock
,
flags
);
return
0
;
}
static
u32
owl_dma_getbytes_chan
(
struct
owl_dma_vchan
*
vchan
)
{
struct
owl_dma_pchan
*
pchan
;
...
...
@@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
bytes
=
min_t
(
size_t
,
(
len
-
offset
),
OWL_DMA_FRAME_MAX_LENGTH
);
ret
=
owl_dma_cfg_lli
(
vchan
,
lli
,
src
+
offset
,
dst
+
offset
,
bytes
,
DMA_MEM_TO_MEM
);
bytes
,
DMA_MEM_TO_MEM
,
&
vchan
->
cfg
,
txd
->
cyclic
);
if
(
ret
)
{
dev_warn
(
chan2dev
(
chan
),
"failed to config lli
\n
"
);
goto
err_txd_free
;
}
prev
=
owl_dma_add_lli
(
txd
,
prev
,
lli
);
prev
=
owl_dma_add_lli
(
txd
,
prev
,
lli
,
false
);
}
return
vchan_tx_prep
(
&
vchan
->
vc
,
&
txd
->
vd
,
flags
);
...
...
@@ -770,6 +873,133 @@ static struct dma_async_tx_descriptor
return
NULL
;
}
static
struct
dma_async_tx_descriptor
*
owl_dma_prep_slave_sg
(
struct
dma_chan
*
chan
,
struct
scatterlist
*
sgl
,
unsigned
int
sg_len
,
enum
dma_transfer_direction
dir
,
unsigned
long
flags
,
void
*
context
)
{
struct
owl_dma
*
od
=
to_owl_dma
(
chan
->
device
);
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
struct
dma_slave_config
*
sconfig
=
&
vchan
->
cfg
;
struct
owl_dma_txd
*
txd
;
struct
owl_dma_lli
*
lli
,
*
prev
=
NULL
;
struct
scatterlist
*
sg
;
dma_addr_t
addr
,
src
=
0
,
dst
=
0
;
size_t
len
;
int
ret
,
i
;
txd
=
kzalloc
(
sizeof
(
*
txd
),
GFP_NOWAIT
);
if
(
!
txd
)
return
NULL
;
INIT_LIST_HEAD
(
&
txd
->
lli_list
);
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
addr
=
sg_dma_address
(
sg
);
len
=
sg_dma_len
(
sg
);
if
(
len
>
OWL_DMA_FRAME_MAX_LENGTH
)
{
dev_err
(
od
->
dma
.
dev
,
"frame length exceeds max supported length"
);
goto
err_txd_free
;
}
lli
=
owl_dma_alloc_lli
(
od
);
if
(
!
lli
)
{
dev_err
(
chan2dev
(
chan
),
"failed to allocate lli"
);
goto
err_txd_free
;
}
if
(
dir
==
DMA_MEM_TO_DEV
)
{
src
=
addr
;
dst
=
sconfig
->
dst_addr
;
}
else
{
src
=
sconfig
->
src_addr
;
dst
=
addr
;
}
ret
=
owl_dma_cfg_lli
(
vchan
,
lli
,
src
,
dst
,
len
,
dir
,
sconfig
,
txd
->
cyclic
);
if
(
ret
)
{
dev_warn
(
chan2dev
(
chan
),
"failed to config lli"
);
goto
err_txd_free
;
}
prev
=
owl_dma_add_lli
(
txd
,
prev
,
lli
,
false
);
}
return
vchan_tx_prep
(
&
vchan
->
vc
,
&
txd
->
vd
,
flags
);
err_txd_free:
owl_dma_free_txd
(
od
,
txd
);
return
NULL
;
}
static
struct
dma_async_tx_descriptor
*
owl_prep_dma_cyclic
(
struct
dma_chan
*
chan
,
dma_addr_t
buf_addr
,
size_t
buf_len
,
size_t
period_len
,
enum
dma_transfer_direction
dir
,
unsigned
long
flags
)
{
struct
owl_dma
*
od
=
to_owl_dma
(
chan
->
device
);
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
struct
dma_slave_config
*
sconfig
=
&
vchan
->
cfg
;
struct
owl_dma_txd
*
txd
;
struct
owl_dma_lli
*
lli
,
*
prev
=
NULL
,
*
first
=
NULL
;
dma_addr_t
src
=
0
,
dst
=
0
;
unsigned
int
periods
=
buf_len
/
period_len
;
int
ret
,
i
;
txd
=
kzalloc
(
sizeof
(
*
txd
),
GFP_NOWAIT
);
if
(
!
txd
)
return
NULL
;
INIT_LIST_HEAD
(
&
txd
->
lli_list
);
txd
->
cyclic
=
true
;
for
(
i
=
0
;
i
<
periods
;
i
++
)
{
lli
=
owl_dma_alloc_lli
(
od
);
if
(
!
lli
)
{
dev_warn
(
chan2dev
(
chan
),
"failed to allocate lli"
);
goto
err_txd_free
;
}
if
(
dir
==
DMA_MEM_TO_DEV
)
{
src
=
buf_addr
+
(
period_len
*
i
);
dst
=
sconfig
->
dst_addr
;
}
else
if
(
dir
==
DMA_DEV_TO_MEM
)
{
src
=
sconfig
->
src_addr
;
dst
=
buf_addr
+
(
period_len
*
i
);
}
ret
=
owl_dma_cfg_lli
(
vchan
,
lli
,
src
,
dst
,
period_len
,
dir
,
sconfig
,
txd
->
cyclic
);
if
(
ret
)
{
dev_warn
(
chan2dev
(
chan
),
"failed to config lli"
);
goto
err_txd_free
;
}
if
(
!
first
)
first
=
lli
;
prev
=
owl_dma_add_lli
(
txd
,
prev
,
lli
,
false
);
}
/* close the cyclic list */
owl_dma_add_lli
(
txd
,
prev
,
first
,
true
);
return
vchan_tx_prep
(
&
vchan
->
vc
,
&
txd
->
vd
,
flags
);
err_txd_free:
owl_dma_free_txd
(
od
,
txd
);
return
NULL
;
}
static
void
owl_dma_free_chan_resources
(
struct
dma_chan
*
chan
)
{
struct
owl_dma_vchan
*
vchan
=
to_owl_vchan
(
chan
);
...
...
@@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
}
}
static
struct
dma_chan
*
owl_dma_of_xlate
(
struct
of_phandle_args
*
dma_spec
,
struct
of_dma
*
ofdma
)
{
struct
owl_dma
*
od
=
ofdma
->
of_dma_data
;
struct
owl_dma_vchan
*
vchan
;
struct
dma_chan
*
chan
;
u8
drq
=
dma_spec
->
args
[
0
];
if
(
drq
>
od
->
nr_vchans
)
return
NULL
;
chan
=
dma_get_any_slave_channel
(
&
od
->
dma
);
if
(
!
chan
)
return
NULL
;
vchan
=
to_owl_vchan
(
chan
);
vchan
->
drq
=
drq
;
return
chan
;
}
static
int
owl_dma_probe
(
struct
platform_device
*
pdev
)
{
struct
device_node
*
np
=
pdev
->
dev
.
of_node
;
...
...
@@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
spin_lock_init
(
&
od
->
lock
);
dma_cap_set
(
DMA_MEMCPY
,
od
->
dma
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
od
->
dma
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
od
->
dma
.
cap_mask
);
od
->
dma
.
dev
=
&
pdev
->
dev
;
od
->
dma
.
device_free_chan_resources
=
owl_dma_free_chan_resources
;
od
->
dma
.
device_tx_status
=
owl_dma_tx_status
;
od
->
dma
.
device_issue_pending
=
owl_dma_issue_pending
;
od
->
dma
.
device_prep_dma_memcpy
=
owl_dma_prep_memcpy
;
od
->
dma
.
device_prep_slave_sg
=
owl_dma_prep_slave_sg
;
od
->
dma
.
device_prep_dma_cyclic
=
owl_prep_dma_cyclic
;
od
->
dma
.
device_config
=
owl_dma_config
;
od
->
dma
.
device_pause
=
owl_dma_pause
;
od
->
dma
.
device_resume
=
owl_dma_resume
;
od
->
dma
.
device_terminate_all
=
owl_dma_terminate_all
;
od
->
dma
.
src_addr_widths
=
BIT
(
DMA_SLAVE_BUSWIDTH_4_BYTES
);
od
->
dma
.
dst_addr_widths
=
BIT
(
DMA_SLAVE_BUSWIDTH_4_BYTES
);
...
...
@@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
goto
err_pool_free
;
}
/* Device-tree DMA controller registration */
ret
=
of_dma_controller_register
(
pdev
->
dev
.
of_node
,
owl_dma_of_xlate
,
od
);
if
(
ret
)
{
dev_err
(
&
pdev
->
dev
,
"of_dma_controller_register failed
\n
"
);
goto
err_dma_unregister
;
}
return
0
;
err_dma_unregister:
dma_async_device_unregister
(
&
od
->
dma
);
err_pool_free:
clk_disable_unprepare
(
od
->
clk
);
dma_pool_destroy
(
od
->
lli_pool
);
...
...
@@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
{
struct
owl_dma
*
od
=
platform_get_drvdata
(
pdev
);
of_dma_controller_free
(
pdev
->
dev
.
of_node
);
dma_async_device_unregister
(
&
od
->
dma
);
/* Mask all interrupts for this execution environment */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment