Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0e52d987
Commit
0e52d987
authored
Jul 31, 2012
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'dma-omap', 'dma-pl08x' and 'dma-sa11x0' into dmaengine
parents
89269ef1
a068682c
d9444325
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
614 additions
and
785 deletions
+614
-785
arch/arm/mach-spear3xx/spear300.c
arch/arm/mach-spear3xx/spear300.c
+0
-26
arch/arm/mach-spear3xx/spear310.c
arch/arm/mach-spear3xx/spear310.c
+0
-26
arch/arm/mach-spear3xx/spear320.c
arch/arm/mach-spear3xx/spear320.c
+0
-26
arch/arm/mach-spear3xx/spear3xx.c
arch/arm/mach-spear3xx/spear3xx.c
+2
-1
arch/arm/mach-spear6xx/spear6xx.c
arch/arm/mach-spear6xx/spear6xx.c
+2
-49
arch/arm/plat-spear/include/plat/pl080.h
arch/arm/plat-spear/include/plat/pl080.h
+3
-3
arch/arm/plat-spear/pl080.c
arch/arm/plat-spear/pl080.c
+4
-6
drivers/dma/Kconfig
drivers/dma/Kconfig
+1
-0
drivers/dma/amba-pl08x.c
drivers/dma/amba-pl08x.c
+473
-468
drivers/dma/sa11x0-dma.c
drivers/dma/sa11x0-dma.c
+121
-32
include/linux/amba/pl08x.h
include/linux/amba/pl08x.h
+8
-148
No files found.
arch/arm/mach-spear3xx/spear300.c
View file @
0e52d987
...
...
@@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = {
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart0_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"irda"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"adc"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"to_jpeg"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"from_jpeg"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras0_rx"
,
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras0_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras1_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras1_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras2_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras2_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras3_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras3_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras4_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras4_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_rx"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_tx"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_rx"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_tx"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
};
...
...
arch/arm/mach-spear3xx/spear310.c
View file @
0e52d987
...
...
@@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = {
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart0_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"irda"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"adc"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"to_jpeg"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"from_jpeg"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart1_rx"
,
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart1_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart2_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart2_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart3_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart3_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart4_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart4_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart5_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart5_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_rx"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_tx"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_rx"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_tx"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
};
...
...
arch/arm/mach-spear3xx/spear320.c
View file @
0e52d987
...
...
@@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = {
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart0_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c0_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c0_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"irda"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"adc"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"to_jpeg"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"from_jpeg"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp1_rx"
,
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ssp1_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ssp2_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ssp2_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"uart1_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"uart1_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"uart2_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"uart2_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2c1_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2c1_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2c2_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2c2_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2s_rx"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"i2s_tx"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"rs485_rx"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"rs485_tx"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
};
...
...
arch/arm/mach-spear3xx/spear3xx.c
View file @
0e52d987
...
...
@@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = {
struct
pl08x_platform_data
pl080_plat_data
=
{
.
memcpy_channel
=
{
.
bus_id
=
"memcpy"
,
.
cctl
=
(
PL080_BSIZE_16
<<
PL080_CONTROL_SB_SIZE_SHIFT
|
\
.
cctl_memcpy
=
(
PL080_BSIZE_16
<<
PL080_CONTROL_SB_SIZE_SHIFT
|
\
PL080_BSIZE_16
<<
PL080_CONTROL_DB_SIZE_SHIFT
|
\
PL080_WIDTH_32BIT
<<
PL080_CONTROL_SWIDTH_SHIFT
|
\
PL080_WIDTH_32BIT
<<
PL080_CONTROL_DWIDTH_SHIFT
|
\
...
...
arch/arm/mach-spear6xx/spear6xx.c
View file @
0e52d987
...
...
@@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = {
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp1_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart0_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart0_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart1_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"uart1_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp2_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ssp2_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ssp0_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ssp0_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"i2c_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"irda"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"adc"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"to_jpeg"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"from_jpeg"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
0
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras0_rx"
,
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras0_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras1_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras1_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras2_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras2_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras3_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras3_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras4_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras4_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras5_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_rx"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras6_tx"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_rx"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ras7_tx"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
1
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB1
,
},
{
.
bus_id
=
"ext0_rx"
,
.
min_signal
=
0
,
.
max_signal
=
0
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext0_tx"
,
.
min_signal
=
1
,
.
max_signal
=
1
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext1_rx"
,
.
min_signal
=
2
,
.
max_signal
=
2
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext1_tx"
,
.
min_signal
=
3
,
.
max_signal
=
3
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext2_rx"
,
.
min_signal
=
4
,
.
max_signal
=
4
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext2_tx"
,
.
min_signal
=
5
,
.
max_signal
=
5
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext3_rx"
,
.
min_signal
=
6
,
.
max_signal
=
6
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext3_tx"
,
.
min_signal
=
7
,
.
max_signal
=
7
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext4_rx"
,
.
min_signal
=
8
,
.
max_signal
=
8
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext4_tx"
,
.
min_signal
=
9
,
.
max_signal
=
9
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext5_rx"
,
.
min_signal
=
10
,
.
max_signal
=
10
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext5_tx"
,
.
min_signal
=
11
,
.
max_signal
=
11
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext6_rx"
,
.
min_signal
=
12
,
.
max_signal
=
12
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext6_tx"
,
.
min_signal
=
13
,
.
max_signal
=
13
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext7_rx"
,
.
min_signal
=
14
,
.
max_signal
=
14
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
{
.
bus_id
=
"ext7_tx"
,
.
min_signal
=
15
,
.
max_signal
=
15
,
.
muxval
=
2
,
.
cctl
=
0
,
.
periph_buses
=
PL08X_AHB2
,
},
};
...
...
@@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = {
struct
pl08x_platform_data
pl080_plat_data
=
{
.
memcpy_channel
=
{
.
bus_id
=
"memcpy"
,
.
cctl
=
(
PL080_BSIZE_16
<<
PL080_CONTROL_SB_SIZE_SHIFT
|
\
.
cctl_memcpy
=
(
PL080_BSIZE_16
<<
PL080_CONTROL_SB_SIZE_SHIFT
|
\
PL080_BSIZE_16
<<
PL080_CONTROL_DB_SIZE_SHIFT
|
\
PL080_WIDTH_32BIT
<<
PL080_CONTROL_SWIDTH_SHIFT
|
\
PL080_WIDTH_32BIT
<<
PL080_CONTROL_DWIDTH_SHIFT
|
\
...
...
arch/arm/plat-spear/include/plat/pl080.h
View file @
0e52d987
...
...
@@ -14,8 +14,8 @@
#ifndef __PLAT_PL080_H
#define __PLAT_PL080_H
struct
pl08x_
dma_chan
;
int
pl080_get_signal
(
struct
pl08x_dma_chan
*
ch
);
void
pl080_put_signal
(
struct
pl08x_dma_chan
*
ch
);
struct
pl08x_
channel_data
;
int
pl080_get_signal
(
const
struct
pl08x_channel_data
*
cd
);
void
pl080_put_signal
(
const
struct
pl08x_channel_data
*
cd
,
int
signal
);
#endif
/* __PLAT_PL080_H */
arch/arm/plat-spear/pl080.c
View file @
0e52d987
...
...
@@ -27,9 +27,8 @@ struct {
unsigned
char
val
;
}
signals
[
16
]
=
{{
0
,
0
},
};
int
pl080_get_signal
(
struct
pl08x_dma_chan
*
ch
)
int
pl080_get_signal
(
const
struct
pl08x_channel_data
*
cd
)
{
const
struct
pl08x_channel_data
*
cd
=
ch
->
cd
;
unsigned
int
signal
=
cd
->
min_signal
,
val
;
unsigned
long
flags
;
...
...
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
return
signal
;
}
void
pl080_put_signal
(
struct
pl08x_dma_chan
*
ch
)
void
pl080_put_signal
(
const
struct
pl08x_channel_data
*
cd
,
int
signal
)
{
const
struct
pl08x_channel_data
*
cd
=
ch
->
cd
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
lock
,
flags
);
/* if signal is not used */
if
(
!
signals
[
cd
->
min_
signal
].
busy
)
if
(
!
signals
[
signal
].
busy
)
BUG
();
signals
[
cd
->
min_
signal
].
busy
--
;
signals
[
signal
].
busy
--
;
spin_unlock_irqrestore
(
&
lock
,
flags
);
}
drivers/dma/Kconfig
View file @
0e52d987
...
...
@@ -53,6 +53,7 @@ config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Platform has a PL08x DMAC device
which can provide DMA engine support
...
...
drivers/dma/amba-pl08x.c
View file @
0e52d987
...
...
@@ -86,10 +86,12 @@
#include <asm/hardware/pl080.h>
#include "dmaengine.h"
#include "virt-dma.h"
#define DRIVER_NAME "pl08xdmac"
static
struct
amba_driver
pl08x_amba_driver
;
struct
pl08x_driver_data
;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
...
...
@@ -118,6 +120,123 @@ struct pl08x_lli {
u32
cctl
;
};
/**
* struct pl08x_bus_data - information of source or destination
* busses for a transfer
* @addr: current address
* @maxwidth: the maximum width of a transfer on this bus
* @buswidth: the width of this bus in bytes: 1, 2 or 4
*/
struct
pl08x_bus_data
{
dma_addr_t
addr
;
u8
maxwidth
;
u8
buswidth
;
};
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
* @lock: a lock to use when altering an instance of this struct
* @serving: the virtual channel currently being served by this physical
* channel
* @locked: channel unavailable for the system, e.g. dedicated to secure
* world
*/
struct
pl08x_phy_chan
{
unsigned
int
id
;
void
__iomem
*
base
;
spinlock_t
lock
;
struct
pl08x_dma_chan
*
serving
;
bool
locked
;
};
/**
* struct pl08x_sg - structure containing data per sg
* @src_addr: src address of sg
* @dst_addr: dst address of sg
* @len: transfer len in bytes
* @node: node for txd's dsg_list
*/
struct
pl08x_sg
{
dma_addr_t
src_addr
;
dma_addr_t
dst_addr
;
size_t
len
;
struct
list_head
node
;
};
/**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @dsg_list: list of children sg's
* @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs
* @cctl: control reg values for current txd
* @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
*/
struct
pl08x_txd
{
struct
virt_dma_desc
vd
;
struct
list_head
dsg_list
;
dma_addr_t
llis_bus
;
struct
pl08x_lli
*
llis_va
;
/* Default cctl value for LLIs */
u32
cctl
;
/*
* Settings to be put into the physical channel when we
* trigger this txd. Other registers are in llis_va[0].
*/
u32
ccfg
;
bool
done
;
};
/**
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
* states
* @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
* channel and is running a transfer on it
* @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
* channel, but the transfer is currently paused
* @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
* channel to become available (only pertains to memcpy channels)
*/
enum
pl08x_dma_chan_state
{
PL08X_CHAN_IDLE
,
PL08X_CHAN_RUNNING
,
PL08X_CHAN_PAUSED
,
PL08X_CHAN_WAITING
,
};
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @vc: wrappped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting
*/
struct
pl08x_dma_chan
{
struct
virt_dma_chan
vc
;
struct
pl08x_phy_chan
*
phychan
;
const
char
*
name
;
const
struct
pl08x_channel_data
*
cd
;
struct
dma_slave_config
cfg
;
struct
pl08x_txd
*
at
;
struct
pl08x_driver_data
*
host
;
enum
pl08x_dma_chan_state
state
;
bool
slave
;
int
signal
;
unsigned
mux_use
;
};
/**
* struct pl08x_driver_data - the local state holder for the PL08x
* @slave: slave engine for this instance
...
...
@@ -128,7 +247,6 @@ struct pl08x_lli {
* @pd: platform data passed in from the platform/machine
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
...
...
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
struct
pl08x_platform_data
*
pd
;
struct
pl08x_phy_chan
*
phy_chans
;
struct
dma_pool
*
pool
;
int
pool_ctr
;
u8
lli_buses
;
u8
mem_buses
;
spinlock_t
lock
;
};
/*
...
...
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
static
inline
struct
pl08x_dma_chan
*
to_pl08x_chan
(
struct
dma_chan
*
chan
)
{
return
container_of
(
chan
,
struct
pl08x_dma_chan
,
chan
);
return
container_of
(
chan
,
struct
pl08x_dma_chan
,
vc
.
chan
);
}
static
inline
struct
pl08x_txd
*
to_pl08x_txd
(
struct
dma_async_tx_descriptor
*
tx
)
{
return
container_of
(
tx
,
struct
pl08x_txd
,
tx
);
return
container_of
(
tx
,
struct
pl08x_txd
,
vd
.
tx
);
}
/*
* Mux handling.
*
* This gives us the DMA request input to the PL08x primecell which the
* peripheral described by the channel data will be routed to, possibly
* via a board/SoC specific external MUX. One important point to note
* here is that this does not depend on the physical channel.
*/
static
int
pl08x_request_mux
(
struct
pl08x_dma_chan
*
plchan
)
{
const
struct
pl08x_platform_data
*
pd
=
plchan
->
host
->
pd
;
int
ret
;
if
(
plchan
->
mux_use
++
==
0
&&
pd
->
get_signal
)
{
ret
=
pd
->
get_signal
(
plchan
->
cd
);
if
(
ret
<
0
)
{
plchan
->
mux_use
=
0
;
return
ret
;
}
plchan
->
signal
=
ret
;
}
return
0
;
}
static
void
pl08x_release_mux
(
struct
pl08x_dma_chan
*
plchan
)
{
const
struct
pl08x_platform_data
*
pd
=
plchan
->
host
->
pd
;
if
(
plchan
->
signal
>=
0
)
{
WARN_ON
(
plchan
->
mux_use
==
0
);
if
(
--
plchan
->
mux_use
==
0
&&
pd
->
put_signal
)
{
pd
->
put_signal
(
plchan
->
cd
,
plchan
->
signal
);
plchan
->
signal
=
-
1
;
}
}
}
/*
...
...
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
* been set when the LLIs were constructed. Poke them into the hardware
* and start the transfer.
*/
static
void
pl08x_start_txd
(
struct
pl08x_dma_chan
*
plchan
,
struct
pl08x_txd
*
txd
)
static
void
pl08x_start_next_txd
(
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
struct
pl08x_phy_chan
*
phychan
=
plchan
->
phychan
;
struct
pl08x_lli
*
lli
=
&
txd
->
llis_va
[
0
];
struct
virt_dma_desc
*
vd
=
vchan_next_desc
(
&
plchan
->
vc
);
struct
pl08x_txd
*
txd
=
to_pl08x_txd
(
&
vd
->
tx
);
struct
pl08x_lli
*
lli
;
u32
val
;
list_del
(
&
txd
->
vd
.
node
);
plchan
->
at
=
txd
;
/* Wait for channel inactive */
while
(
pl08x_phy_channel_busy
(
phychan
))
cpu_relax
();
lli
=
&
txd
->
llis_va
[
0
];
dev_vdbg
(
&
pl08x
->
adev
->
dev
,
"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x
\n
"
,
...
...
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct
pl08x_phy_chan
*
ch
;
struct
pl08x_txd
*
txd
;
unsigned
long
flags
;
size_t
bytes
=
0
;
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
ch
=
plchan
->
phychan
;
txd
=
plchan
->
at
;
...
...
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
}
}
/* Sum up all queued transactions */
if
(
!
list_empty
(
&
plchan
->
pend_list
))
{
struct
pl08x_txd
*
txdi
;
list_for_each_entry
(
txdi
,
&
plchan
->
pend_list
,
node
)
{
struct
pl08x_sg
*
dsg
;
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
bytes
+=
dsg
->
len
;
}
}
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
bytes
;
}
...
...
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
if
(
!
ch
->
locked
&&
!
ch
->
serving
)
{
ch
->
serving
=
virt_chan
;
ch
->
signal
=
-
1
;
spin_unlock_irqrestore
(
&
ch
->
lock
,
flags
);
break
;
}
...
...
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return
NULL
;
}
pm_runtime_get_sync
(
&
pl08x
->
adev
->
dev
);
return
ch
;
}
/* Mark the physical channel as free. Note, this write is atomic. */
static
inline
void
pl08x_put_phy_channel
(
struct
pl08x_driver_data
*
pl08x
,
struct
pl08x_phy_chan
*
ch
)
{
unsigned
long
flags
;
ch
->
serving
=
NULL
;
}
/*
* Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The
* virtual channel lock must be held at this point.
*/
static
void
pl08x_phy_alloc_and_start
(
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
struct
pl08x_phy_chan
*
ch
;
spin_lock_irqsave
(
&
ch
->
lock
,
flags
);
ch
=
pl08x_get_phy_channel
(
pl08x
,
plchan
);
if
(
!
ch
)
{
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"no physical channel available for xfer on %s
\n
"
,
plchan
->
name
);
plchan
->
state
=
PL08X_CHAN_WAITING
;
return
;
}
/* Stop the channel and clear its interrupts */
pl08x_terminate_phy_chan
(
pl08x
,
ch
);
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"allocated physical channel %d for xfer on %s
\n
"
,
ch
->
id
,
plchan
->
name
);
pm_runtime_put
(
&
pl08x
->
adev
->
dev
);
plchan
->
phychan
=
ch
;
plchan
->
state
=
PL08X_CHAN_RUNNING
;
pl08x_start_next_txd
(
plchan
);
}
/* Mark it as free */
ch
->
serving
=
NULL
;
spin_unlock_irqrestore
(
&
ch
->
lock
,
flags
);
static
void
pl08x_phy_reassign_start
(
struct
pl08x_phy_chan
*
ch
,
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"reassigned physical channel %d for xfer on %s
\n
"
,
ch
->
id
,
plchan
->
name
);
/*
* We do this without taking the lock; we're really only concerned
* about whether this pointer is NULL or not, and we're guaranteed
* that this will only be called when it _already_ is non-NULL.
*/
ch
->
serving
=
plchan
;
plchan
->
phychan
=
ch
;
plchan
->
state
=
PL08X_CHAN_RUNNING
;
pl08x_start_next_txd
(
plchan
);
}
/*
* Free a physical DMA channel, potentially reallocating it to another
* virtual channel if we have any pending.
*/
static
void
pl08x_phy_free
(
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
struct
pl08x_dma_chan
*
p
,
*
next
;
retry:
next
=
NULL
;
/* Find a waiting virtual channel for the next transfer. */
list_for_each_entry
(
p
,
&
pl08x
->
memcpy
.
channels
,
vc
.
chan
.
device_node
)
if
(
p
->
state
==
PL08X_CHAN_WAITING
)
{
next
=
p
;
break
;
}
if
(
!
next
)
{
list_for_each_entry
(
p
,
&
pl08x
->
slave
.
channels
,
vc
.
chan
.
device_node
)
if
(
p
->
state
==
PL08X_CHAN_WAITING
)
{
next
=
p
;
break
;
}
}
/* Ensure that the physical channel is stopped */
pl08x_terminate_phy_chan
(
pl08x
,
plchan
->
phychan
);
if
(
next
)
{
bool
success
;
/*
* Eww. We know this isn't going to deadlock
* but lockdep probably doesn't.
*/
spin_lock
(
&
next
->
vc
.
lock
);
/* Re-check the state now that we have the lock */
success
=
next
->
state
==
PL08X_CHAN_WAITING
;
if
(
success
)
pl08x_phy_reassign_start
(
plchan
->
phychan
,
next
);
spin_unlock
(
&
next
->
vc
.
lock
);
/* If the state changed, try to find another channel */
if
(
!
success
)
goto
retry
;
}
else
{
/* No more jobs, so free up the physical channel */
pl08x_put_phy_channel
(
pl08x
,
plchan
->
phychan
);
}
plchan
->
phychan
=
NULL
;
plchan
->
state
=
PL08X_CHAN_IDLE
;
}
/*
...
...
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return
0
;
}
pl08x
->
pool_ctr
++
;
bd
.
txd
=
txd
;
bd
.
lli_bus
=
(
pl08x
->
lli_buses
&
PL08X_AHB2
)
?
PL080_LLI_LM_AHB2
:
0
;
cctl
=
txd
->
cctl
;
...
...
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return
num_llis
;
}
/* You should call this with the struct pl08x lock held */
static
void
pl08x_free_txd
(
struct
pl08x_driver_data
*
pl08x
,
struct
pl08x_txd
*
txd
)
{
struct
pl08x_sg
*
dsg
,
*
_dsg
;
/* Free the LLI */
if
(
txd
->
llis_va
)
dma_pool_free
(
pl08x
->
pool
,
txd
->
llis_va
,
txd
->
llis_bus
);
pl08x
->
pool_ctr
--
;
list_for_each_entry_safe
(
dsg
,
_dsg
,
&
txd
->
dsg_list
,
node
)
{
list_del
(
&
dsg
->
node
);
kfree
(
dsg
);
...
...
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree
(
txd
);
}
static
void
pl08x_free_txd_list
(
struct
pl08x_driver_data
*
pl08x
,
struct
pl08x_dma_chan
*
plchan
)
static
void
pl08x_unmap_buffers
(
struct
pl08x_txd
*
txd
)
{
struct
pl08x_txd
*
txdi
=
NULL
;
struct
pl08x_txd
*
next
;
if
(
!
list_empty
(
&
plchan
->
pend_list
))
{
list_for_each_entry_safe
(
txdi
,
next
,
&
plchan
->
pend_list
,
node
)
{
list_del
(
&
txdi
->
node
);
pl08x_free_txd
(
pl08x
,
txdi
);
struct
device
*
dev
=
txd
->
vd
.
tx
.
chan
->
device
->
dev
;
struct
pl08x_sg
*
dsg
;
if
(
!
(
txd
->
vd
.
tx
.
flags
&
DMA_COMPL_SKIP_SRC_UNMAP
))
{
if
(
txd
->
vd
.
tx
.
flags
&
DMA_COMPL_SRC_UNMAP_SINGLE
)
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_single
(
dev
,
dsg
->
src_addr
,
dsg
->
len
,
DMA_TO_DEVICE
);
else
{
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_page
(
dev
,
dsg
->
src_addr
,
dsg
->
len
,
DMA_TO_DEVICE
);
}
}
if
(
!
(
txd
->
vd
.
tx
.
flags
&
DMA_COMPL_SKIP_DEST_UNMAP
))
{
if
(
txd
->
vd
.
tx
.
flags
&
DMA_COMPL_DEST_UNMAP_SINGLE
)
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_single
(
dev
,
dsg
->
dst_addr
,
dsg
->
len
,
DMA_FROM_DEVICE
);
else
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_page
(
dev
,
dsg
->
dst_addr
,
dsg
->
len
,
DMA_FROM_DEVICE
);
}
}
/*
* The DMA ENGINE API
*/
static
int
pl08x_alloc_chan_resources
(
struct
dma_chan
*
chan
)
static
void
pl08x_desc_free
(
struct
virt_dma_desc
*
vd
)
{
return
0
;
}
struct
pl08x_txd
*
txd
=
to_pl08x_txd
(
&
vd
->
tx
)
;
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
vd
->
tx
.
chan
);
static
void
pl08x_free_chan_resources
(
struct
dma_chan
*
chan
)
{
if
(
!
plchan
->
slave
)
pl08x_unmap_buffers
(
txd
);
if
(
!
txd
->
done
)
pl08x_release_mux
(
plchan
);
pl08x_free_txd
(
plchan
->
host
,
txd
);
}
/*
* This should be called with the channel plchan->lock held
*/
static
int
prep_phy_channel
(
struct
pl08x_dma_chan
*
plchan
,
struct
pl08x_txd
*
txd
)
static
void
pl08x_free_txd_list
(
struct
pl08x_driver_data
*
pl08x
,
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
struct
pl08x_phy_chan
*
ch
;
int
ret
;
/* Check if we already have a channel */
if
(
plchan
->
phychan
)
{
ch
=
plchan
->
phychan
;
goto
got_channel
;
}
LIST_HEAD
(
head
);
struct
pl08x_txd
*
txd
;
ch
=
pl08x_get_phy_channel
(
pl08x
,
plchan
);
if
(
!
ch
)
{
/* No physical channel available, cope with it */
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"no physical channel available for xfer on %s
\n
"
,
plchan
->
name
);
return
-
EBUSY
;
}
vchan_get_all_descriptors
(
&
plchan
->
vc
,
&
head
);
/*
* OK we have a physical channel: for memcpy() this is all we
* need, but for slaves the physical signals may be muxed!
* Can the platform allow us to use this channel?
*/
if
(
plchan
->
slave
&&
pl08x
->
pd
->
get_signal
)
{
ret
=
pl08x
->
pd
->
get_signal
(
plchan
);
if
(
ret
<
0
)
{
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"unable to use physical channel %d for transfer on %s due to platform restrictions
\n
"
,
ch
->
id
,
plchan
->
name
);
/* Release physical channel & return */
pl08x_put_phy_channel
(
pl08x
,
ch
);
return
-
EBUSY
;
}
ch
->
signal
=
ret
;
while
(
!
list_empty
(
&
head
))
{
txd
=
list_first_entry
(
&
head
,
struct
pl08x_txd
,
vd
.
node
);
list_del
(
&
txd
->
vd
.
node
);
pl08x_desc_free
(
&
txd
->
vd
);
}
plchan
->
phychan
=
ch
;
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"allocated physical channel %d and signal %d for xfer on %s
\n
"
,
ch
->
id
,
ch
->
signal
,
plchan
->
name
);
got_channel:
/* Assign the flow control signal to this channel */
if
(
txd
->
direction
==
DMA_MEM_TO_DEV
)
txd
->
ccfg
|=
ch
->
signal
<<
PL080_CONFIG_DST_SEL_SHIFT
;
else
if
(
txd
->
direction
==
DMA_DEV_TO_MEM
)
txd
->
ccfg
|=
ch
->
signal
<<
PL080_CONFIG_SRC_SEL_SHIFT
;
plchan
->
phychan_hold
++
;
return
0
;
}
static
void
release_phy_channel
(
struct
pl08x_dma_chan
*
plchan
)
/*
* The DMA ENGINE API
*/
static
int
pl08x_alloc_chan_resources
(
struct
dma_chan
*
chan
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
if
((
plchan
->
phychan
->
signal
>=
0
)
&&
pl08x
->
pd
->
put_signal
)
{
pl08x
->
pd
->
put_signal
(
plchan
);
plchan
->
phychan
->
signal
=
-
1
;
}
pl08x_put_phy_channel
(
pl08x
,
plchan
->
phychan
);
plchan
->
phychan
=
NULL
;
return
0
;
}
static
dma_cookie_t
pl08x_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
static
void
pl08x_free_chan_resources
(
struct
dma_chan
*
chan
)
{
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
tx
->
chan
);
struct
pl08x_txd
*
txd
=
to_pl08x_txd
(
tx
);
unsigned
long
flags
;
dma_cookie_t
cookie
;
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
cookie
=
dma_cookie_assign
(
tx
);
/* Put this onto the pending list */
list_add_tail
(
&
txd
->
node
,
&
plchan
->
pend_list
);
/*
* If there was no physical channel available for this memcpy,
* stack the request up and indicate that the channel is waiting
* for a free physical channel.
*/
if
(
!
plchan
->
slave
&&
!
plchan
->
phychan
)
{
/* Do this memcpy whenever there is a channel ready */
plchan
->
state
=
PL08X_CHAN_WAITING
;
plchan
->
waiting
=
txd
;
}
else
{
plchan
->
phychan_hold
--
;
}
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
cookie
;
/* Ensure all queued descriptors are freed */
vchan_free_chan_resources
(
to_virt_chan
(
chan
));
}
static
struct
dma_async_tx_descriptor
*
pl08x_prep_dma_interrupt
(
...
...
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t
cookie
,
struct
dma_tx_state
*
txstate
)
{
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
chan
);
struct
virt_dma_desc
*
vd
;
unsigned
long
flags
;
enum
dma_status
ret
;
size_t
bytes
=
0
;
ret
=
dma_cookie_status
(
chan
,
cookie
,
txstate
);
if
(
ret
==
DMA_SUCCESS
)
return
ret
;
/*
* There's no point calculating the residue if there's
* no txstate to store the value.
*/
if
(
!
txstate
)
{
if
(
plchan
->
state
==
PL08X_CHAN_PAUSED
)
ret
=
DMA_PAUSED
;
return
ret
;
}
spin_lock_irqsave
(
&
plchan
->
vc
.
lock
,
flags
);
ret
=
dma_cookie_status
(
chan
,
cookie
,
txstate
);
if
(
ret
!=
DMA_SUCCESS
)
{
vd
=
vchan_find_desc
(
&
plchan
->
vc
,
cookie
);
if
(
vd
)
{
/* On the issued list, so hasn't been processed yet */
struct
pl08x_txd
*
txd
=
to_pl08x_txd
(
&
vd
->
tx
);
struct
pl08x_sg
*
dsg
;
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
bytes
+=
dsg
->
len
;
}
else
{
bytes
=
pl08x_getbytes_chan
(
plchan
);
}
}
spin_unlock_irqrestore
(
&
plchan
->
vc
.
lock
,
flags
);
/*
* This cookie not complete yet
* Get number of bytes left in the active transactions and queue
*/
dma_set_residue
(
txstate
,
pl08x_getbytes_chan
(
plchan
)
);
dma_set_residue
(
txstate
,
bytes
);
if
(
plchan
->
state
==
PL08X_CHAN_PAUSED
)
ret
urn
DMA_PAUSED
;
if
(
plchan
->
state
==
PL08X_CHAN_PAUSED
&&
ret
==
DMA_IN_PROGRESS
)
ret
=
DMA_PAUSED
;
/* Whether waiting or running, we're in progress */
return
DMA_IN_PROGRESS
;
return
ret
;
}
/* PrimeCell DMA extension */
...
...
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
return
burst_sizes
[
i
].
reg
;
}
static
int
dma_set_runtime_config
(
struct
dma_chan
*
chan
,
struct
dma_slave_config
*
config
)
static
u32
pl08x_get_cctl
(
struct
pl08x_dma_chan
*
pl
chan
,
enum
dma_slave_buswidth
addr_width
,
u32
maxburst
)
{
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
chan
);
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
enum
dma_slave_buswidth
addr_width
;
u32
width
,
burst
,
maxburst
;
u32
cctl
=
0
;
if
(
!
plchan
->
slave
)
return
-
EINVAL
;
/* Transfer direction */
plchan
->
runtime_direction
=
config
->
direction
;
if
(
config
->
direction
==
DMA_MEM_TO_DEV
)
{
addr_width
=
config
->
dst_addr_width
;
maxburst
=
config
->
dst_maxburst
;
}
else
if
(
config
->
direction
==
DMA_DEV_TO_MEM
)
{
addr_width
=
config
->
src_addr_width
;
maxburst
=
config
->
src_maxburst
;
}
else
{
dev_err
(
&
pl08x
->
adev
->
dev
,
"bad runtime_config: alien transfer direction
\n
"
);
return
-
EINVAL
;
}
u32
width
,
burst
,
cctl
=
0
;
width
=
pl08x_width
(
addr_width
);
if
(
width
==
~
0
)
{
dev_err
(
&
pl08x
->
adev
->
dev
,
"bad runtime_config: alien address width
\n
"
);
return
-
EINVAL
;
}
if
(
width
==
~
0
)
return
~
0
;
cctl
|=
width
<<
PL080_CONTROL_SWIDTH_SHIFT
;
cctl
|=
width
<<
PL080_CONTROL_DWIDTH_SHIFT
;
...
...
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl
|=
burst
<<
PL080_CONTROL_SB_SIZE_SHIFT
;
cctl
|=
burst
<<
PL080_CONTROL_DB_SIZE_SHIFT
;
plchan
->
device_fc
=
config
->
device_fc
;
return
pl08x_cctl
(
cctl
);
}
if
(
plchan
->
runtime_direction
==
DMA_DEV_TO_MEM
)
{
plchan
->
src_addr
=
config
->
src_addr
;
plchan
->
src_cctl
=
pl08x_cctl
(
cctl
)
|
PL080_CONTROL_DST_INCR
|
pl08x_select_bus
(
plchan
->
cd
->
periph_buses
,
pl08x
->
mem_buses
);
}
else
{
plchan
->
dst_addr
=
config
->
dst_addr
;
plchan
->
dst_cctl
=
pl08x_cctl
(
cctl
)
|
PL080_CONTROL_SRC_INCR
|
pl08x_select_bus
(
pl08x
->
mem_buses
,
plchan
->
cd
->
periph_buses
);
}
static
int
dma_set_runtime_config
(
struct
dma_chan
*
chan
,
struct
dma_slave_config
*
config
)
{
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
chan
);
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x
\n
"
,
dma_chan_name
(
chan
),
plchan
->
name
,
(
config
->
direction
==
DMA_DEV_TO_MEM
)
?
"RX"
:
"TX"
,
addr_width
,
maxburst
,
cctl
);
if
(
!
plchan
->
slave
)
return
-
EINVAL
;
/* Reject definitely invalid configurations */
if
(
config
->
src_addr_width
==
DMA_SLAVE_BUSWIDTH_8_BYTES
||
config
->
dst_addr_width
==
DMA_SLAVE_BUSWIDTH_8_BYTES
)
return
-
EINVAL
;
plchan
->
cfg
=
*
config
;
return
0
;
}
...
...
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
struct
pl08x_dma_chan
*
plchan
=
to_pl08x_chan
(
chan
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
/* Something is already active, or we're waiting for a channel... */
if
(
plchan
->
at
||
plchan
->
state
==
PL08X_CHAN_WAITING
)
{
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
;
}
/* Take the first element in the queue and execute it */
if
(
!
list_empty
(
&
plchan
->
pend_list
))
{
struct
pl08x_txd
*
next
;
next
=
list_first_entry
(
&
plchan
->
pend_list
,
struct
pl08x_txd
,
node
);
list_del
(
&
next
->
node
);
plchan
->
state
=
PL08X_CHAN_RUNNING
;
pl08x_start_txd
(
plchan
,
next
);
spin_lock_irqsave
(
&
plchan
->
vc
.
lock
,
flags
);
if
(
vchan_issue_pending
(
&
plchan
->
vc
))
{
if
(
!
plchan
->
phychan
&&
plchan
->
state
!=
PL08X_CHAN_WAITING
)
pl08x_phy_alloc_and_start
(
plchan
);
}
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
}
static
int
pl08x_prep_channel_resources
(
struct
pl08x_dma_chan
*
plchan
,
struct
pl08x_txd
*
txd
)
{
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
unsigned
long
flags
;
int
num_llis
,
ret
;
num_llis
=
pl08x_fill_llis_for_desc
(
pl08x
,
txd
);
if
(
!
num_llis
)
{
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
pl08x_free_txd
(
pl08x
,
txd
);
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
-
EINVAL
;
}
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
/*
* See if we already have a physical channel allocated,
* else this is the time to try to get one.
*/
ret
=
prep_phy_channel
(
plchan
,
txd
);
if
(
ret
)
{
/*
* No physical channel was available.
*
* memcpy transfers can be sorted out at submission time.
*
* Slave transfers may have been denied due to platform
* channel muxing restrictions. Since there is no guarantee
* that this will ever be resolved, and the signal must be
* acquired AFTER acquiring the physical channel, we will let
* them be NACK:ed with -EBUSY here. The drivers can retry
* the prep() call if they are eager on doing this using DMA.
*/
if
(
plchan
->
slave
)
{
pl08x_free_txd_list
(
pl08x
,
plchan
);
pl08x_free_txd
(
pl08x
,
txd
);
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
-
EBUSY
;
}
}
else
/*
* Else we're all set, paused and ready to roll, status
* will switch to PL08X_CHAN_RUNNING when we call
* issue_pending(). If there is something running on the
* channel already we don't change its state.
*/
if
(
plchan
->
state
==
PL08X_CHAN_IDLE
)
plchan
->
state
=
PL08X_CHAN_PAUSED
;
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
return
0
;
spin_unlock_irqrestore
(
&
plchan
->
vc
.
lock
,
flags
);
}
static
struct
pl08x_txd
*
pl08x_get_txd
(
struct
pl08x_dma_chan
*
plchan
,
unsigned
long
flags
)
static
struct
pl08x_txd
*
pl08x_get_txd
(
struct
pl08x_dma_chan
*
plchan
)
{
struct
pl08x_txd
*
txd
=
kzalloc
(
sizeof
(
*
txd
),
GFP_NOWAIT
);
if
(
txd
)
{
dma_async_tx_descriptor_init
(
&
txd
->
tx
,
&
plchan
->
chan
);
txd
->
tx
.
flags
=
flags
;
txd
->
tx
.
tx_submit
=
pl08x_tx_submit
;
INIT_LIST_HEAD
(
&
txd
->
node
);
INIT_LIST_HEAD
(
&
txd
->
dsg_list
);
/* Always enable error and terminal interrupts */
...
...
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct
pl08x_sg
*
dsg
;
int
ret
;
txd
=
pl08x_get_txd
(
plchan
,
flags
);
txd
=
pl08x_get_txd
(
plchan
);
if
(
!
txd
)
{
dev_err
(
&
pl08x
->
adev
->
dev
,
"%s no memory for descriptor
\n
"
,
__func__
);
...
...
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
}
list_add_tail
(
&
dsg
->
node
,
&
txd
->
dsg_list
);
txd
->
direction
=
DMA_NONE
;
dsg
->
src_addr
=
src
;
dsg
->
dst_addr
=
dest
;
dsg
->
len
=
len
;
/* Set platform data for m2m */
txd
->
ccfg
|=
PL080_FLOW_MEM2MEM
<<
PL080_CONFIG_FLOW_CONTROL_SHIFT
;
txd
->
cctl
=
pl08x
->
pd
->
memcpy_channel
.
cctl
&
txd
->
cctl
=
pl08x
->
pd
->
memcpy_channel
.
cctl
_memcpy
&
~
(
PL080_CONTROL_DST_AHB2
|
PL080_CONTROL_SRC_AHB2
);
/* Both to be incremented or the code will break */
...
...
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd
->
cctl
|=
pl08x_select_bus
(
pl08x
->
mem_buses
,
pl08x
->
mem_buses
);
ret
=
pl08x_prep_channel_resources
(
plchan
,
txd
);
if
(
ret
)
ret
=
pl08x_fill_llis_for_desc
(
plchan
->
host
,
txd
);
if
(
!
ret
)
{
pl08x_free_txd
(
pl08x
,
txd
);
return
NULL
;
}
return
&
txd
->
tx
;
return
vchan_tx_prep
(
&
plchan
->
vc
,
&
txd
->
vd
,
flags
)
;
}
static
struct
dma_async_tx_descriptor
*
pl08x_prep_slave_sg
(
...
...
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct
pl08x_txd
*
txd
;
struct
pl08x_sg
*
dsg
;
struct
scatterlist
*
sg
;
enum
dma_slave_buswidth
addr_width
;
dma_addr_t
slave_addr
;
int
ret
,
tmp
;
u8
src_buses
,
dst_buses
;
u32
maxburst
,
cctl
;
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"%s prepare transaction of %d bytes from %s
\n
"
,
__func__
,
sg_dma_len
(
sgl
),
plchan
->
name
);
txd
=
pl08x_get_txd
(
plchan
,
flags
);
txd
=
pl08x_get_txd
(
plchan
);
if
(
!
txd
)
{
dev_err
(
&
pl08x
->
adev
->
dev
,
"%s no txd
\n
"
,
__func__
);
return
NULL
;
}
if
(
direction
!=
plchan
->
runtime_direction
)
dev_err
(
&
pl08x
->
adev
->
dev
,
"%s DMA setup does not match "
"the direction configured for the PrimeCell
\n
"
,
__func__
);
/*
* Set up addresses, the PrimeCell configured address
* will take precedence since this may configure the
* channel target address dynamically at runtime.
*/
txd
->
direction
=
direction
;
if
(
direction
==
DMA_MEM_TO_DEV
)
{
txd
->
cctl
=
plchan
->
dst_cctl
;
slave_addr
=
plchan
->
dst_addr
;
cctl
=
PL080_CONTROL_SRC_INCR
;
slave_addr
=
plchan
->
cfg
.
dst_addr
;
addr_width
=
plchan
->
cfg
.
dst_addr_width
;
maxburst
=
plchan
->
cfg
.
dst_maxburst
;
src_buses
=
pl08x
->
mem_buses
;
dst_buses
=
plchan
->
cd
->
periph_buses
;
}
else
if
(
direction
==
DMA_DEV_TO_MEM
)
{
txd
->
cctl
=
plchan
->
src_cctl
;
slave_addr
=
plchan
->
src_addr
;
cctl
=
PL080_CONTROL_DST_INCR
;
slave_addr
=
plchan
->
cfg
.
src_addr
;
addr_width
=
plchan
->
cfg
.
src_addr_width
;
maxburst
=
plchan
->
cfg
.
src_maxburst
;
src_buses
=
plchan
->
cd
->
periph_buses
;
dst_buses
=
pl08x
->
mem_buses
;
}
else
{
pl08x_free_txd
(
pl08x
,
txd
);
dev_err
(
&
pl08x
->
adev
->
dev
,
...
...
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return
NULL
;
}
if
(
plchan
->
device_fc
)
cctl
|=
pl08x_get_cctl
(
plchan
,
addr_width
,
maxburst
);
if
(
cctl
==
~
0
)
{
pl08x_free_txd
(
pl08x
,
txd
);
dev_err
(
&
pl08x
->
adev
->
dev
,
"DMA slave configuration botched?
\n
"
);
return
NULL
;
}
txd
->
cctl
=
cctl
|
pl08x_select_bus
(
src_buses
,
dst_buses
);
if
(
plchan
->
cfg
.
device_fc
)
tmp
=
(
direction
==
DMA_MEM_TO_DEV
)
?
PL080_FLOW_MEM2PER_PER
:
PL080_FLOW_PER2MEM_PER
;
else
...
...
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd
->
ccfg
|=
tmp
<<
PL080_CONFIG_FLOW_CONTROL_SHIFT
;
ret
=
pl08x_request_mux
(
plchan
);
if
(
ret
<
0
)
{
pl08x_free_txd
(
pl08x
,
txd
);
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"unable to mux for transfer on %s due to platform restrictions
\n
"
,
plchan
->
name
);
return
NULL
;
}
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"allocated DMA request signal %d for xfer on %s
\n
"
,
plchan
->
signal
,
plchan
->
name
);
/* Assign the flow control signal to this channel */
if
(
direction
==
DMA_MEM_TO_DEV
)
txd
->
ccfg
|=
plchan
->
signal
<<
PL080_CONFIG_DST_SEL_SHIFT
;
else
txd
->
ccfg
|=
plchan
->
signal
<<
PL080_CONFIG_SRC_SEL_SHIFT
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
tmp
)
{
dsg
=
kzalloc
(
sizeof
(
struct
pl08x_sg
),
GFP_NOWAIT
);
if
(
!
dsg
)
{
pl08x_release_mux
(
plchan
);
pl08x_free_txd
(
pl08x
,
txd
);
dev_err
(
&
pl08x
->
adev
->
dev
,
"%s no mem for pl080 sg
\n
"
,
__func__
);
...
...
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
}
ret
=
pl08x_prep_channel_resources
(
plchan
,
txd
);
if
(
ret
)
ret
=
pl08x_fill_llis_for_desc
(
plchan
->
host
,
txd
);
if
(
!
ret
)
{
pl08x_release_mux
(
plchan
);
pl08x_free_txd
(
pl08x
,
txd
);
return
NULL
;
}
return
&
txd
->
tx
;
return
vchan_tx_prep
(
&
plchan
->
vc
,
&
txd
->
vd
,
flags
)
;
}
static
int
pl08x_control
(
struct
dma_chan
*
chan
,
enum
dma_ctrl_cmd
cmd
,
...
...
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* Anything succeeds on channels with no physical allocation and
* no queued transfers.
*/
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
spin_lock_irqsave
(
&
plchan
->
vc
.
lock
,
flags
);
if
(
!
plchan
->
phychan
&&
!
plchan
->
at
)
{
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
plchan
->
vc
.
lock
,
flags
);
return
0
;
}
...
...
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
plchan
->
state
=
PL08X_CHAN_IDLE
;
if
(
plchan
->
phychan
)
{
pl08x_terminate_phy_chan
(
pl08x
,
plchan
->
phychan
);
/*
* Mark physical channel as free and free any slave
* signal
*/
release_phy_channel
(
plchan
);
plchan
->
phychan_hold
=
0
;
pl08x_phy_free
(
plchan
);
}
/* Dequeue jobs and free LLIs */
if
(
plchan
->
at
)
{
pl08x_
free_txd
(
pl08x
,
plchan
->
at
);
pl08x_
desc_free
(
&
plchan
->
at
->
vd
);
plchan
->
at
=
NULL
;
}
/* Dequeue jobs not yet fired as well */
...
...
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break
;
}
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
plchan
->
vc
.
lock
,
flags
);
return
ret
;
}
...
...
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
writel
(
PL080_CONFIG_ENABLE
,
pl08x
->
base
+
PL080_CONFIG
);
}
static
void
pl08x_unmap_buffers
(
struct
pl08x_txd
*
txd
)
{
struct
device
*
dev
=
txd
->
tx
.
chan
->
device
->
dev
;
struct
pl08x_sg
*
dsg
;
if
(
!
(
txd
->
tx
.
flags
&
DMA_COMPL_SKIP_SRC_UNMAP
))
{
if
(
txd
->
tx
.
flags
&
DMA_COMPL_SRC_UNMAP_SINGLE
)
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_single
(
dev
,
dsg
->
src_addr
,
dsg
->
len
,
DMA_TO_DEVICE
);
else
{
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_page
(
dev
,
dsg
->
src_addr
,
dsg
->
len
,
DMA_TO_DEVICE
);
}
}
if
(
!
(
txd
->
tx
.
flags
&
DMA_COMPL_SKIP_DEST_UNMAP
))
{
if
(
txd
->
tx
.
flags
&
DMA_COMPL_DEST_UNMAP_SINGLE
)
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_single
(
dev
,
dsg
->
dst_addr
,
dsg
->
len
,
DMA_FROM_DEVICE
);
else
list_for_each_entry
(
dsg
,
&
txd
->
dsg_list
,
node
)
dma_unmap_page
(
dev
,
dsg
->
dst_addr
,
dsg
->
len
,
DMA_FROM_DEVICE
);
}
}
static
void
pl08x_tasklet
(
unsigned
long
data
)
{
struct
pl08x_dma_chan
*
plchan
=
(
struct
pl08x_dma_chan
*
)
data
;
struct
pl08x_driver_data
*
pl08x
=
plchan
->
host
;
struct
pl08x_txd
*
txd
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
txd
=
plchan
->
at
;
plchan
->
at
=
NULL
;
if
(
txd
)
{
/* Update last completed */
dma_cookie_complete
(
&
txd
->
tx
);
}
/* If a new descriptor is queued, set it up plchan->at is NULL here */
if
(
!
list_empty
(
&
plchan
->
pend_list
))
{
struct
pl08x_txd
*
next
;
next
=
list_first_entry
(
&
plchan
->
pend_list
,
struct
pl08x_txd
,
node
);
list_del
(
&
next
->
node
);
pl08x_start_txd
(
plchan
,
next
);
}
else
if
(
plchan
->
phychan_hold
)
{
/*
* This channel is still in use - we have a new txd being
* prepared and will soon be queued. Don't give up the
* physical channel.
*/
}
else
{
struct
pl08x_dma_chan
*
waiting
=
NULL
;
/*
* No more jobs, so free up the physical channel
* Free any allocated signal on slave transfers too
*/
release_phy_channel
(
plchan
);
plchan
->
state
=
PL08X_CHAN_IDLE
;
/*
* And NOW before anyone else can grab that free:d up
* physical channel, see if there is some memcpy pending
* that seriously needs to start because of being stacked
* up while we were choking the physical channels with data.
*/
list_for_each_entry
(
waiting
,
&
pl08x
->
memcpy
.
channels
,
chan
.
device_node
)
{
if
(
waiting
->
state
==
PL08X_CHAN_WAITING
&&
waiting
->
waiting
!=
NULL
)
{
int
ret
;
/* This should REALLY not fail now */
ret
=
prep_phy_channel
(
waiting
,
waiting
->
waiting
);
BUG_ON
(
ret
);
waiting
->
phychan_hold
--
;
waiting
->
state
=
PL08X_CHAN_RUNNING
;
waiting
->
waiting
=
NULL
;
pl08x_issue_pending
(
&
waiting
->
chan
);
break
;
}
}
}
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
if
(
txd
)
{
dma_async_tx_callback
callback
=
txd
->
tx
.
callback
;
void
*
callback_param
=
txd
->
tx
.
callback_param
;
/* Don't try to unmap buffers on slave channels */
if
(
!
plchan
->
slave
)
pl08x_unmap_buffers
(
txd
);
/* Free the descriptor */
spin_lock_irqsave
(
&
plchan
->
lock
,
flags
);
pl08x_free_txd
(
pl08x
,
txd
);
spin_unlock_irqrestore
(
&
plchan
->
lock
,
flags
);
/* Callback to signal completion */
if
(
callback
)
callback
(
callback_param
);
}
}
static
irqreturn_t
pl08x_irq
(
int
irq
,
void
*
dev
)
{
struct
pl08x_driver_data
*
pl08x
=
dev
;
...
...
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
/* Locate physical channel */
struct
pl08x_phy_chan
*
phychan
=
&
pl08x
->
phy_chans
[
i
];
struct
pl08x_dma_chan
*
plchan
=
phychan
->
serving
;
struct
pl08x_txd
*
tx
;
if
(
!
plchan
)
{
dev_err
(
&
pl08x
->
adev
->
dev
,
...
...
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
continue
;
}
/* Schedule tasklet on this channel */
tasklet_schedule
(
&
plchan
->
tasklet
);
spin_lock
(
&
plchan
->
vc
.
lock
);
tx
=
plchan
->
at
;
if
(
tx
)
{
plchan
->
at
=
NULL
;
/*
* This descriptor is done, release its mux
* reservation.
*/
pl08x_release_mux
(
plchan
);
tx
->
done
=
true
;
vchan_cookie_complete
(
&
tx
->
vd
);
/*
* And start the next descriptor (if any),
* otherwise free this channel.
*/
if
(
vchan_next_desc
(
&
plchan
->
vc
))
pl08x_start_next_txd
(
plchan
);
else
pl08x_phy_free
(
plchan
);
}
spin_unlock
(
&
plchan
->
vc
.
lock
);
mask
|=
(
1
<<
i
);
}
}
...
...
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
static
void
pl08x_dma_slave_init
(
struct
pl08x_dma_chan
*
chan
)
{
u32
cctl
=
pl08x_cctl
(
chan
->
cd
->
cctl
);
chan
->
slave
=
true
;
chan
->
name
=
chan
->
cd
->
bus_id
;
chan
->
src_addr
=
chan
->
cd
->
addr
;
chan
->
dst_addr
=
chan
->
cd
->
addr
;
chan
->
src_cctl
=
cctl
|
PL080_CONTROL_DST_INCR
|
pl08x_select_bus
(
chan
->
cd
->
periph_buses
,
chan
->
host
->
mem_buses
);
chan
->
dst_cctl
=
cctl
|
PL080_CONTROL_SRC_INCR
|
pl08x_select_bus
(
chan
->
host
->
mem_buses
,
chan
->
cd
->
periph_buses
);
chan
->
cfg
.
src_addr
=
chan
->
cd
->
addr
;
chan
->
cfg
.
dst_addr
=
chan
->
cd
->
addr
;
}
/*
...
...
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan
->
host
=
pl08x
;
chan
->
state
=
PL08X_CHAN_IDLE
;
chan
->
signal
=
-
1
;
if
(
slave
)
{
chan
->
cd
=
&
pl08x
->
pd
->
slave_channels
[
i
];
...
...
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
return
-
ENOMEM
;
}
}
if
(
chan
->
cd
->
circular_buffer
)
{
dev_err
(
&
pl08x
->
adev
->
dev
,
"channel %s: circular buffers not supported
\n
"
,
chan
->
name
);
kfree
(
chan
);
continue
;
}
dev_dbg
(
&
pl08x
->
adev
->
dev
,
"initialize virtual channel
\"
%s
\"\n
"
,
chan
->
name
);
chan
->
chan
.
device
=
dmadev
;
dma_cookie_init
(
&
chan
->
chan
);
spin_lock_init
(
&
chan
->
lock
);
INIT_LIST_HEAD
(
&
chan
->
pend_list
);
tasklet_init
(
&
chan
->
tasklet
,
pl08x_tasklet
,
(
unsigned
long
)
chan
);
list_add_tail
(
&
chan
->
chan
.
device_node
,
&
dmadev
->
channels
);
chan
->
vc
.
desc_free
=
pl08x_desc_free
;
vchan_init
(
&
chan
->
vc
,
dmadev
);
}
dev_info
(
&
pl08x
->
adev
->
dev
,
"initialized %d virtual %s channels
\n
"
,
i
,
slave
?
"slave"
:
"memcpy"
);
...
...
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
struct
pl08x_dma_chan
*
next
;
list_for_each_entry_safe
(
chan
,
next
,
&
dmadev
->
channels
,
chan
.
device_node
)
{
list_del
(
&
chan
->
chan
.
device_node
);
next
,
&
dmadev
->
channels
,
vc
.
chan
.
device_node
)
{
list_del
(
&
chan
->
vc
.
chan
.
device_node
);
kfree
(
chan
);
}
}
...
...
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf
(
s
,
"
\n
PL08x virtual memcpy channels:
\n
"
);
seq_printf
(
s
,
"CHANNEL:
\t
STATE:
\n
"
);
seq_printf
(
s
,
"--------
\t
------
\n
"
);
list_for_each_entry
(
chan
,
&
pl08x
->
memcpy
.
channels
,
chan
.
device_node
)
{
list_for_each_entry
(
chan
,
&
pl08x
->
memcpy
.
channels
,
vc
.
chan
.
device_node
)
{
seq_printf
(
s
,
"%s
\t\t
%s
\n
"
,
chan
->
name
,
pl08x_state_str
(
chan
->
state
));
}
...
...
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf
(
s
,
"
\n
PL08x virtual slave channels:
\n
"
);
seq_printf
(
s
,
"CHANNEL:
\t
STATE:
\n
"
);
seq_printf
(
s
,
"--------
\t
------
\n
"
);
list_for_each_entry
(
chan
,
&
pl08x
->
slave
.
channels
,
chan
.
device_node
)
{
list_for_each_entry
(
chan
,
&
pl08x
->
slave
.
channels
,
vc
.
chan
.
device_node
)
{
seq_printf
(
s
,
"%s
\t\t
%s
\n
"
,
chan
->
name
,
pl08x_state_str
(
chan
->
state
));
}
...
...
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto
out_no_pl08x
;
}
pm_runtime_set_active
(
&
adev
->
dev
);
pm_runtime_enable
(
&
adev
->
dev
);
/* Initialize memcpy engine */
dma_cap_set
(
DMA_MEMCPY
,
pl08x
->
memcpy
.
cap_mask
);
pl08x
->
memcpy
.
dev
=
&
adev
->
dev
;
...
...
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto
out_no_lli_pool
;
}
spin_lock_init
(
&
pl08x
->
lock
);
pl08x
->
base
=
ioremap
(
adev
->
res
.
start
,
resource_size
(
&
adev
->
res
));
if
(
!
pl08x
->
base
)
{
ret
=
-
ENOMEM
;
...
...
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch
->
id
=
i
;
ch
->
base
=
pl08x
->
base
+
PL080_Cx_BASE
(
i
);
spin_lock_init
(
&
ch
->
lock
);
ch
->
signal
=
-
1
;
/*
* Nomadik variants can have channels that are locked
...
...
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_part
(
adev
),
amba_rev
(
adev
),
(
unsigned
long
long
)
adev
->
res
.
start
,
adev
->
irq
[
0
]);
pm_runtime_put
(
&
adev
->
dev
);
return
0
;
out_no_slave_reg:
...
...
@@ -2026,9 +2034,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dma_pool_destroy
(
pl08x
->
pool
);
out_no_lli_pool:
out_no_platdata:
pm_runtime_put
(
&
adev
->
dev
);
pm_runtime_disable
(
&
adev
->
dev
);
kfree
(
pl08x
);
out_no_pl08x:
amba_release_regions
(
adev
);
...
...
drivers/dma/sa11x0-dma.c
View file @
0e52d987
...
...
@@ -78,6 +78,8 @@ struct sa11x0_dma_desc {
u32
ddar
;
size_t
size
;
unsigned
period
;
bool
cyclic
;
unsigned
sglen
;
struct
sa11x0_dma_sg
sg
[
0
];
...
...
@@ -178,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
return
;
if
(
p
->
sg_load
==
txd
->
sglen
)
{
struct
sa11x0_dma_desc
*
txn
=
sa11x0_dma_next_desc
(
c
);
if
(
!
txd
->
cyclic
)
{
struct
sa11x0_dma_desc
*
txn
=
sa11x0_dma_next_desc
(
c
);
/*
* We have reached the end of the current descriptor.
* Peek at the next descriptor, and if compatible with
* the current, start processing it.
*/
if
(
txn
&&
txn
->
ddar
==
txd
->
ddar
)
{
txd
=
txn
;
sa11x0_dma_start_desc
(
p
,
txn
);
/*
* We have reached the end of the current descriptor.
* Peek at the next descriptor, and if compatible with
* the current, start processing it.
*/
if
(
txn
&&
txn
->
ddar
==
txd
->
ddar
)
{
txd
=
txn
;
sa11x0_dma_start_desc
(
p
,
txn
);
}
else
{
p
->
txd_load
=
NULL
;
return
;
}
}
else
{
p
->
txd_load
=
NULL
;
return
;
/* Cyclic: reset back to beginning */
p
->
sg_load
=
0
;
}
}
...
...
@@ -224,13 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
struct
sa11x0_dma_desc
*
txd
=
p
->
txd_done
;
if
(
++
p
->
sg_done
==
txd
->
sglen
)
{
vchan_cookie_complete
(
&
txd
->
vd
);
if
(
!
txd
->
cyclic
)
{
vchan_cookie_complete
(
&
txd
->
vd
);
p
->
sg_done
=
0
;
p
->
txd_done
=
p
->
txd_load
;
p
->
sg_done
=
0
;
p
->
txd_done
=
p
->
txd_load
;
if
(
!
p
->
txd_done
)
tasklet_schedule
(
&
p
->
dev
->
task
);
if
(
!
p
->
txd_done
)
tasklet_schedule
(
&
p
->
dev
->
task
);
}
else
{
if
((
p
->
sg_done
%
txd
->
period
)
==
0
)
vchan_cyclic_callback
(
&
txd
->
vd
);
/* Cyclic: reset back to beginning */
p
->
sg_done
=
0
;
}
}
sa11x0_dma_start_sg
(
p
,
c
);
...
...
@@ -416,27 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_dev
*
d
=
to_sa11x0_dma
(
chan
->
device
);
struct
sa11x0_dma_phy
*
p
;
struct
sa11x0_dma_desc
*
tx
d
;
struct
virt_dma_desc
*
v
d
;
unsigned
long
flags
;
enum
dma_status
ret
;
size_t
bytes
=
0
;
ret
=
dma_cookie_status
(
&
c
->
vc
.
chan
,
cookie
,
state
);
if
(
ret
==
DMA_SUCCESS
)
return
ret
;
if
(
!
state
)
return
c
->
status
;
spin_lock_irqsave
(
&
c
->
vc
.
lock
,
flags
);
p
=
c
->
phy
;
ret
=
c
->
status
;
if
(
p
)
{
dma_addr_t
addr
=
sa11x0_dma_pos
(
p
);
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: addr:%x
\n
"
,
addr
);
/*
* If the cookie is on our issue queue, then the residue is
* its total size.
*/
vd
=
vchan_find_desc
(
&
c
->
vc
,
cookie
);
if
(
vd
)
{
state
->
residue
=
container_of
(
vd
,
struct
sa11x0_dma_desc
,
vd
)
->
size
;
}
else
if
(
!
p
)
{
state
->
residue
=
0
;
}
else
{
struct
sa11x0_dma_desc
*
txd
;
size_t
bytes
=
0
;
txd
=
p
->
txd_done
;
if
(
p
->
txd_done
&&
p
->
txd_done
->
vd
.
tx
.
cookie
==
cookie
)
txd
=
p
->
txd_done
;
else
if
(
p
->
txd_load
&&
p
->
txd_load
->
vd
.
tx
.
cookie
==
cookie
)
txd
=
p
->
txd_load
;
else
txd
=
NULL
;
ret
=
c
->
status
;
if
(
txd
)
{
dma_addr_t
addr
=
sa11x0_dma_pos
(
p
);
unsigned
i
;
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: addr:%x
\n
"
,
addr
);
for
(
i
=
0
;
i
<
txd
->
sglen
;
i
++
)
{
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: [%u] %x+%x
\n
"
,
i
,
txd
->
sg
[
i
].
addr
,
txd
->
sg
[
i
].
len
);
...
...
@@ -459,18 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
bytes
+=
txd
->
sg
[
i
].
len
;
}
}
if
(
txd
!=
p
->
txd_load
&&
p
->
txd_load
)
bytes
+=
p
->
txd_load
->
size
;
}
list_for_each_entry
(
txd
,
&
c
->
vc
.
desc_issued
,
vd
.
node
)
{
bytes
+=
txd
->
size
;
state
->
residue
=
bytes
;
}
spin_unlock_irqrestore
(
&
c
->
vc
.
lock
,
flags
);
if
(
state
)
state
->
residue
=
bytes
;
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: bytes 0x%zx
\n
"
,
bytes
);
dev_vdbg
(
d
->
slave
.
dev
,
"tx_status: bytes 0x%zx
\n
"
,
state
->
residue
);
return
ret
;
}
...
...
@@ -584,6 +612,65 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
return
vchan_tx_prep
(
&
c
->
vc
,
&
txd
->
vd
,
flags
);
}
static
struct
dma_async_tx_descriptor
*
sa11x0_dma_prep_dma_cyclic
(
struct
dma_chan
*
chan
,
dma_addr_t
addr
,
size_t
size
,
size_t
period
,
enum
dma_transfer_direction
dir
,
void
*
context
)
{
struct
sa11x0_dma_chan
*
c
=
to_sa11x0_dma_chan
(
chan
);
struct
sa11x0_dma_desc
*
txd
;
unsigned
i
,
j
,
k
,
sglen
,
sgperiod
;
/* SA11x0 channels can only operate in their native direction */
if
(
dir
!=
(
c
->
ddar
&
DDAR_RW
?
DMA_DEV_TO_MEM
:
DMA_MEM_TO_DEV
))
{
dev_err
(
chan
->
device
->
dev
,
"vchan %p: bad DMA direction: DDAR:%08x dir:%u
\n
"
,
&
c
->
vc
,
c
->
ddar
,
dir
);
return
NULL
;
}
sgperiod
=
DIV_ROUND_UP
(
period
,
DMA_MAX_SIZE
&
~
DMA_ALIGN
);
sglen
=
size
*
sgperiod
/
period
;
/* Do not allow zero-sized txds */
if
(
sglen
==
0
)
return
NULL
;
txd
=
kzalloc
(
sizeof
(
*
txd
)
+
sglen
*
sizeof
(
txd
->
sg
[
0
]),
GFP_ATOMIC
);
if
(
!
txd
)
{
dev_dbg
(
chan
->
device
->
dev
,
"vchan %p: kzalloc failed
\n
"
,
&
c
->
vc
);
return
NULL
;
}
for
(
i
=
k
=
0
;
i
<
size
/
period
;
i
++
)
{
size_t
tlen
,
len
=
period
;
for
(
j
=
0
;
j
<
sgperiod
;
j
++
,
k
++
)
{
tlen
=
len
;
if
(
tlen
>
DMA_MAX_SIZE
)
{
unsigned
mult
=
DIV_ROUND_UP
(
tlen
,
DMA_MAX_SIZE
&
~
DMA_ALIGN
);
tlen
=
(
tlen
/
mult
)
&
~
DMA_ALIGN
;
}
txd
->
sg
[
k
].
addr
=
addr
;
txd
->
sg
[
k
].
len
=
tlen
;
addr
+=
tlen
;
len
-=
tlen
;
}
WARN_ON
(
len
!=
0
);
}
WARN_ON
(
k
!=
sglen
);
txd
->
ddar
=
c
->
ddar
;
txd
->
size
=
size
;
txd
->
sglen
=
sglen
;
txd
->
cyclic
=
1
;
txd
->
period
=
sgperiod
;
return
vchan_tx_prep
(
&
c
->
vc
,
&
txd
->
vd
,
DMA_PREP_INTERRUPT
|
DMA_CTRL_ACK
);
}
static
int
sa11x0_dma_slave_config
(
struct
sa11x0_dma_chan
*
c
,
struct
dma_slave_config
*
cfg
)
{
u32
ddar
=
c
->
ddar
&
((
0xf
<<
4
)
|
DDAR_RW
);
...
...
@@ -854,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
}
dma_cap_set
(
DMA_SLAVE
,
d
->
slave
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
d
->
slave
.
cap_mask
);
d
->
slave
.
device_prep_slave_sg
=
sa11x0_dma_prep_slave_sg
;
d
->
slave
.
device_prep_dma_cyclic
=
sa11x0_dma_prep_dma_cyclic
;
ret
=
sa11x0_dma_init_dmadev
(
&
d
->
slave
,
&
pdev
->
dev
);
if
(
ret
)
{
dev_warn
(
d
->
slave
.
dev
,
"failed to register slave async device: %d
\n
"
,
...
...
include/linux/amba/pl08x.h
View file @
0e52d987
...
...
@@ -21,8 +21,9 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
struct
pl08x_lli
;
struct
pl08x_driver_data
;
struct
pl08x_phy_chan
;
struct
pl08x_txd
;
/* Bitmasks for selecting AHB ports for DMA transfers */
enum
{
...
...
@@ -46,169 +47,28 @@ enum {
* devices with static assignments
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
* @cctl_opt: default options for the channel control register
* @cctl_memcpy: options for the channel control register for memcpy
* *** not used for slave channels ***
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
* this.
* @circular_buffer: whether the buffer passed in is circular and
* shall simply be looped round round (like a record baby round
* round round round)
* @single: the device connected to this channel will request single DMA
* transfers, not bursts. (Bursts are default.)
* @periph_buses: the device connected to this channel is accessible via
* these buses (use PL08X_AHB1 | PL08X_AHB2).
*/
struct
pl08x_channel_data
{
char
*
bus_id
;
c
onst
c
har
*
bus_id
;
int
min_signal
;
int
max_signal
;
u32
muxval
;
u32
cctl
;
u32
cctl
_memcpy
;
dma_addr_t
addr
;
bool
circular_buffer
;
bool
single
;
u8
periph_buses
;
};
/**
* Struct pl08x_bus_data - information of source or destination
* busses for a transfer
* @addr: current address
* @maxwidth: the maximum width of a transfer on this bus
* @buswidth: the width of this bus in bytes: 1, 2 or 4
*/
struct
pl08x_bus_data
{
dma_addr_t
addr
;
u8
maxwidth
;
u8
buswidth
;
};
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
* @lock: a lock to use when altering an instance of this struct
* @signal: the physical signal (aka channel) serving this physical channel
* right now
* @serving: the virtual channel currently being served by this physical
* channel
* @locked: channel unavailable for the system, e.g. dedicated to secure
* world
*/
struct
pl08x_phy_chan
{
unsigned
int
id
;
void
__iomem
*
base
;
spinlock_t
lock
;
int
signal
;
struct
pl08x_dma_chan
*
serving
;
bool
locked
;
};
/**
* struct pl08x_sg - structure containing data per sg
* @src_addr: src address of sg
* @dst_addr: dst address of sg
* @len: transfer len in bytes
* @node: node for txd's dsg_list
*/
struct
pl08x_sg
{
dma_addr_t
src_addr
;
dma_addr_t
dst_addr
;
size_t
len
;
struct
list_head
node
;
};
/**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @tx: async tx descriptor
* @node: node for txd list for channels
* @dsg_list: list of children sg's
* @direction: direction of transfer
* @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs
* @cctl: control reg values for current txd
* @ccfg: config reg values for current txd
*/
struct
pl08x_txd
{
struct
dma_async_tx_descriptor
tx
;
struct
list_head
node
;
struct
list_head
dsg_list
;
enum
dma_transfer_direction
direction
;
dma_addr_t
llis_bus
;
struct
pl08x_lli
*
llis_va
;
/* Default cctl value for LLIs */
u32
cctl
;
/*
* Settings to be put into the physical channel when we
* trigger this txd. Other registers are in llis_va[0].
*/
u32
ccfg
;
};
/**
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
* states
* @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
* channel and is running a transfer on it
* @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
* channel, but the transfer is currently paused
* @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
* channel to become available (only pertains to memcpy channels)
*/
enum
pl08x_dma_chan_state
{
PL08X_CHAN_IDLE
,
PL08X_CHAN_RUNNING
,
PL08X_CHAN_PAUSED
,
PL08X_CHAN_WAITING
,
};
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @chan: wrappped abstract channel
* @phychan: the physical channel utilized by this channel, if there is one
* @phychan_hold: if non-zero, hold on to the physical channel even if we
* have no pending entries
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
* @name: name of channel
* @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config
* @runtime_direction: current direction of this channel according to
* runtime config
* @pend_list: queued transactions pending on this channel
* @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
* channels. Fill with 'true' if peripheral should be flow controller. Direction
* will be selected at Runtime.
* @waiting: a TX descriptor on this channel which is waiting for a physical
* channel to become available
*/
struct
pl08x_dma_chan
{
struct
dma_chan
chan
;
struct
pl08x_phy_chan
*
phychan
;
int
phychan_hold
;
struct
tasklet_struct
tasklet
;
char
*
name
;
const
struct
pl08x_channel_data
*
cd
;
dma_addr_t
src_addr
;
dma_addr_t
dst_addr
;
u32
src_cctl
;
u32
dst_cctl
;
enum
dma_transfer_direction
runtime_direction
;
struct
list_head
pend_list
;
struct
pl08x_txd
*
at
;
spinlock_t
lock
;
struct
pl08x_driver_data
*
host
;
enum
pl08x_dma_chan_state
state
;
bool
slave
;
bool
device_fc
;
struct
pl08x_txd
*
waiting
;
};
/**
* struct pl08x_platform_data - the platform configuration for the PL08x
* PrimeCells.
...
...
@@ -229,8 +89,8 @@ struct pl08x_platform_data {
const
struct
pl08x_channel_data
*
slave_channels
;
unsigned
int
num_slave_channels
;
struct
pl08x_channel_data
memcpy_channel
;
int
(
*
get_signal
)(
struct
pl08x_dma_chan
*
);
void
(
*
put_signal
)(
struct
pl08x_dma_chan
*
);
int
(
*
get_signal
)(
const
struct
pl08x_channel_data
*
);
void
(
*
put_signal
)(
const
struct
pl08x_channel_data
*
,
int
);
u8
lli_buses
;
u8
mem_buses
;
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment