Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2ffb850e
Commit
2ffb850e
authored
Apr 10, 2018
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/stm' into for-linus
parents
3a7b854d
9df3bd55
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
243 additions
and
50 deletions
+243
-50
Documentation/devicetree/bindings/dma/stm32-dma.txt
Documentation/devicetree/bindings/dma/stm32-dma.txt
+3
-3
drivers/dma/stm32-dma.c
drivers/dma/stm32-dma.c
+240
-47
No files found.
Documentation/devicetree/bindings/dma/stm32-dma.txt
View file @
2ffb850e
...
...
@@ -62,14 +62,14 @@ channel: a phandle to the DMA controller plus the following four integer cells:
0x1: medium
0x2: high
0x3: very high
4. A 32bit mask specifying the DMA FIFO threshold configuration which are device
dependent:
-bit 0-1: Fifo threshold
4. A 32bit bitfield value specifying DMA features which are device dependent:
-bit 0-1: DMA FIFO threshold selection
0x0: 1/4 full FIFO
0x1: 1/2 full FIFO
0x2: 3/4 full FIFO
0x3: full FIFO
Example:
usart1: serial@40011000 {
...
...
drivers/dma/stm32-dma.c
View file @
2ffb850e
...
...
@@ -5,6 +5,7 @@
*
* Copyright (C) M'boumba Cedric Madianga 2015
* Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
* Pierre-Yves Mordret <pierre-yves.mordret@st.com>
*
* License terms: GNU General Public License (GPL), version 2
*/
...
...
@@ -33,9 +34,14 @@
#define STM32_DMA_LIFCR 0x0008
/* DMA Low Int Flag Clear Reg */
#define STM32_DMA_HIFCR 0x000c
/* DMA High Int Flag Clear Reg */
#define STM32_DMA_TCI BIT(5)
/* Transfer Complete Interrupt */
#define STM32_DMA_HTI BIT(4)
/* Half Transfer Interrupt */
#define STM32_DMA_TEI BIT(3)
/* Transfer Error Interrupt */
#define STM32_DMA_DMEI BIT(2)
/* Direct Mode Error Interrupt */
#define STM32_DMA_FEI BIT(0)
/* FIFO Error Interrupt */
#define STM32_DMA_MASKI (STM32_DMA_TCI \
| STM32_DMA_TEI \
| STM32_DMA_DMEI \
| STM32_DMA_FEI)
/* DMA Stream x Configuration Register */
#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x))
/* x = 0..7 */
...
...
@@ -60,7 +66,8 @@
#define STM32_DMA_SCR_PINC BIT(9)
/* Peripheral increment mode */
#define STM32_DMA_SCR_CIRC BIT(8)
/* Circular mode */
#define STM32_DMA_SCR_PFCTRL BIT(5)
/* Peripheral Flow Controller */
#define STM32_DMA_SCR_TCIE BIT(4)
/* Transfer Cplete Int Enable*/
#define STM32_DMA_SCR_TCIE BIT(4)
/* Transfer Complete Int Enable
*/
#define STM32_DMA_SCR_TEIE BIT(2)
/* Transfer Error Int Enable */
#define STM32_DMA_SCR_DMEIE BIT(1)
/* Direct Mode Err Int Enable */
#define STM32_DMA_SCR_EN BIT(0)
/* Stream Enable */
...
...
@@ -111,11 +118,24 @@
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
/*
* Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
* gather at boundary. Thus it's safer to round down this value on FIFO
* size (16 Bytes)
*/
#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
#define STM32_DMA_MAX_CHANNELS 0x08
#define STM32_DMA_MAX_REQUEST_ID 0x08
#define STM32_DMA_MAX_DATA_PARAM 0x03
#define STM32_DMA_FIFO_SIZE 16
/* FIFO is 16 bytes */
#define STM32_DMA_MIN_BURST 4
#define STM32_DMA_MAX_BURST 16
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
enum
stm32_dma_width
{
STM32_DMA_BYTE
,
STM32_DMA_HALF_WORD
,
...
...
@@ -129,11 +149,18 @@ enum stm32_dma_burst_size {
STM32_DMA_BURST_INCR16
,
};
/**
* struct stm32_dma_cfg - STM32 DMA custom configuration
* @channel_id: channel ID
* @request_line: DMA request
* @stream_config: 32bit mask specifying the DMA channel configuration
* @features: 32bit mask specifying the DMA Feature list
*/
struct
stm32_dma_cfg
{
u32
channel_id
;
u32
request_line
;
u32
stream_config
;
u32
threshold
;
u32
features
;
};
struct
stm32_dma_chan_reg
{
...
...
@@ -171,6 +198,9 @@ struct stm32_dma_chan {
u32
next_sg
;
struct
dma_slave_config
dma_sconfig
;
struct
stm32_dma_chan_reg
chan_reg
;
u32
threshold
;
u32
mem_burst
;
u32
mem_width
;
};
struct
stm32_dma_device
{
...
...
@@ -235,6 +265,85 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
}
}
static
enum
dma_slave_buswidth
stm32_dma_get_max_width
(
u32
buf_len
,
u32
threshold
)
{
enum
dma_slave_buswidth
max_width
;
if
(
threshold
==
STM32_DMA_FIFO_THRESHOLD_FULL
)
max_width
=
DMA_SLAVE_BUSWIDTH_4_BYTES
;
else
max_width
=
DMA_SLAVE_BUSWIDTH_2_BYTES
;
while
((
buf_len
<
max_width
||
buf_len
%
max_width
)
&&
max_width
>
DMA_SLAVE_BUSWIDTH_1_BYTE
)
max_width
=
max_width
>>
1
;
return
max_width
;
}
static
bool
stm32_dma_fifo_threshold_is_allowed
(
u32
burst
,
u32
threshold
,
enum
dma_slave_buswidth
width
)
{
u32
remaining
;
if
(
width
!=
DMA_SLAVE_BUSWIDTH_UNDEFINED
)
{
if
(
burst
!=
0
)
{
/*
* If number of beats fit in several whole bursts
* this configuration is allowed.
*/
remaining
=
((
STM32_DMA_FIFO_SIZE
/
width
)
*
(
threshold
+
1
)
/
4
)
%
burst
;
if
(
remaining
==
0
)
return
true
;
}
else
{
return
true
;
}
}
return
false
;
}
static
bool
stm32_dma_is_burst_possible
(
u32
buf_len
,
u32
threshold
)
{
switch
(
threshold
)
{
case
STM32_DMA_FIFO_THRESHOLD_FULL
:
if
(
buf_len
>=
STM32_DMA_MAX_BURST
)
return
true
;
else
return
false
;
case
STM32_DMA_FIFO_THRESHOLD_HALFFULL
:
if
(
buf_len
>=
STM32_DMA_MAX_BURST
/
2
)
return
true
;
else
return
false
;
default:
return
false
;
}
}
static
u32
stm32_dma_get_best_burst
(
u32
buf_len
,
u32
max_burst
,
u32
threshold
,
enum
dma_slave_buswidth
width
)
{
u32
best_burst
=
max_burst
;
if
(
best_burst
==
1
||
!
stm32_dma_is_burst_possible
(
buf_len
,
threshold
))
return
0
;
while
((
buf_len
<
best_burst
*
width
&&
best_burst
>
1
)
||
!
stm32_dma_fifo_threshold_is_allowed
(
best_burst
,
threshold
,
width
))
{
if
(
best_burst
>
STM32_DMA_MIN_BURST
)
best_burst
=
best_burst
>>
1
;
else
best_burst
=
0
;
}
return
best_burst
;
}
static
int
stm32_dma_get_burst
(
struct
stm32_dma_chan
*
chan
,
u32
maxburst
)
{
switch
(
maxburst
)
{
...
...
@@ -254,12 +363,12 @@ static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
}
static
void
stm32_dma_set_fifo_config
(
struct
stm32_dma_chan
*
chan
,
u32
src_
maxburst
,
u32
dst_max
burst
)
u32
src_
burst
,
u32
dst_
burst
)
{
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_MASK
;
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_DMEIE
;
if
(
(
!
src_maxburst
)
&&
(
!
dst_maxburst
)
)
{
if
(
!
src_burst
&&
!
dst_burst
)
{
/* Using direct mode */
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_DMEIE
;
}
else
{
...
...
@@ -300,7 +409,7 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
flags
=
dma_isr
>>
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
return
flags
;
return
flags
&
STM32_DMA_MASKI
;
}
static
void
stm32_dma_irq_clear
(
struct
stm32_dma_chan
*
chan
,
u32
flags
)
...
...
@@ -315,6 +424,7 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
* If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
* If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
flags
&=
STM32_DMA_MASKI
;
dma_ifcr
=
flags
<<
(((
chan
->
id
&
2
)
<<
3
)
|
((
chan
->
id
&
1
)
*
6
));
if
(
chan
->
id
&
4
)
...
...
@@ -429,6 +539,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
dev_dbg
(
chan2dev
(
chan
),
"SFCR: 0x%08x
\n
"
,
sfcr
);
}
static
void
stm32_dma_configure_next_sg
(
struct
stm32_dma_chan
*
chan
);
static
void
stm32_dma_start_transfer
(
struct
stm32_dma_chan
*
chan
)
{
struct
stm32_dma_device
*
dmadev
=
stm32_dma_get_dev
(
chan
);
...
...
@@ -471,6 +583,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if
(
status
)
stm32_dma_irq_clear
(
chan
,
status
);
if
(
chan
->
desc
->
cyclic
)
stm32_dma_configure_next_sg
(
chan
);
stm32_dma_dump_reg
(
chan
);
/* Start DMA */
...
...
@@ -541,13 +656,29 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
status
=
stm32_dma_irq_status
(
chan
);
scr
=
stm32_dma_read
(
dmadev
,
STM32_DMA_SCR
(
chan
->
id
));
if
(
(
status
&
STM32_DMA_TCI
)
&&
(
scr
&
STM32_DMA_SCR_TCIE
)
)
{
if
(
status
&
STM32_DMA_TCI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_TCI
);
stm32_dma_handle_chan_done
(
chan
);
}
else
{
if
(
scr
&
STM32_DMA_SCR_TCIE
)
stm32_dma_handle_chan_done
(
chan
);
status
&=
~
STM32_DMA_TCI
;
}
if
(
status
&
STM32_DMA_HTI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_HTI
);
status
&=
~
STM32_DMA_HTI
;
}
if
(
status
&
STM32_DMA_FEI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_FEI
);
status
&=
~
STM32_DMA_FEI
;
if
(
!
(
scr
&
STM32_DMA_SCR_EN
))
dev_err
(
chan2dev
(
chan
),
"FIFO Error
\n
"
);
else
dev_dbg
(
chan2dev
(
chan
),
"FIFO over/underrun
\n
"
);
}
if
(
status
)
{
stm32_dma_irq_clear
(
chan
,
status
);
dev_err
(
chan2dev
(
chan
),
"DMA error: status=0x%08x
\n
"
,
status
);
if
(
!
(
scr
&
STM32_DMA_SCR_EN
))
dev_err
(
chan2dev
(
chan
),
"chan disabled by HW
\n
"
);
}
spin_unlock
(
&
chan
->
vchan
.
lock
);
...
...
@@ -564,45 +695,59 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
if
(
vchan_issue_pending
(
&
chan
->
vchan
)
&&
!
chan
->
desc
&&
!
chan
->
busy
)
{
dev_dbg
(
chan2dev
(
chan
),
"vchan %p: issued
\n
"
,
&
chan
->
vchan
);
stm32_dma_start_transfer
(
chan
);
if
(
chan
->
desc
->
cyclic
)
stm32_dma_configure_next_sg
(
chan
);
}
spin_unlock_irqrestore
(
&
chan
->
vchan
.
lock
,
flags
);
}
static
int
stm32_dma_set_xfer_param
(
struct
stm32_dma_chan
*
chan
,
enum
dma_transfer_direction
direction
,
enum
dma_slave_buswidth
*
buswidth
)
enum
dma_slave_buswidth
*
buswidth
,
u32
buf_len
)
{
enum
dma_slave_buswidth
src_addr_width
,
dst_addr_width
;
int
src_bus_width
,
dst_bus_width
;
int
src_burst_size
,
dst_burst_size
;
u32
src_maxburst
,
dst_maxburst
;
u32
dma_scr
=
0
;
u32
src_maxburst
,
dst_maxburst
,
src_best_burst
,
dst_best_burst
;
u32
dma_scr
,
threshold
;
src_addr_width
=
chan
->
dma_sconfig
.
src_addr_width
;
dst_addr_width
=
chan
->
dma_sconfig
.
dst_addr_width
;
src_maxburst
=
chan
->
dma_sconfig
.
src_maxburst
;
dst_maxburst
=
chan
->
dma_sconfig
.
dst_maxburst
;
threshold
=
chan
->
threshold
;
switch
(
direction
)
{
case
DMA_MEM_TO_DEV
:
/* Set device data size */
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
if
(
dst_bus_width
<
0
)
return
dst_bus_width
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_maxburst
);
/* Set device burst size */
dst_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
dst_maxburst
,
threshold
,
dst_addr_width
);
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_best_burst
);
if
(
dst_burst_size
<
0
)
return
dst_burst_size
;
if
(
!
src_addr_width
)
src_addr_width
=
dst_addr_width
;
/* Set memory data size */
src_addr_width
=
stm32_dma_get_max_width
(
buf_len
,
threshold
)
;
chan
->
mem_width
=
src_addr_width
;
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
if
(
src_bus_width
<
0
)
return
src_bus_width
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_maxburst
);
/* Set memory burst size */
src_maxburst
=
STM32_DMA_MAX_BURST
;
src_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
src_maxburst
,
threshold
,
src_addr_width
);
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_best_burst
);
if
(
src_burst_size
<
0
)
return
src_burst_size
;
...
...
@@ -612,27 +757,46 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
STM32_DMA_SCR_PBURST
(
dst_burst_size
)
|
STM32_DMA_SCR_MBURST
(
src_burst_size
);
/* Set FIFO threshold */
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_FTH_MASK
;
chan
->
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_FTH
(
threshold
);
/* Set peripheral address */
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
dst_addr
;
*
buswidth
=
dst_addr_width
;
break
;
case
DMA_DEV_TO_MEM
:
/* Set device data size */
src_bus_width
=
stm32_dma_get_width
(
chan
,
src_addr_width
);
if
(
src_bus_width
<
0
)
return
src_bus_width
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_maxburst
);
/* Set device burst size */
src_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
src_maxburst
,
threshold
,
src_addr_width
);
chan
->
mem_burst
=
src_best_burst
;
src_burst_size
=
stm32_dma_get_burst
(
chan
,
src_best_burst
);
if
(
src_burst_size
<
0
)
return
src_burst_size
;
if
(
!
dst_addr_width
)
dst_addr_width
=
src_addr_width
;
/* Set memory data size */
dst_addr_width
=
stm32_dma_get_max_width
(
buf_len
,
threshold
)
;
chan
->
mem_width
=
dst_addr_width
;
dst_bus_width
=
stm32_dma_get_width
(
chan
,
dst_addr_width
);
if
(
dst_bus_width
<
0
)
return
dst_bus_width
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_maxburst
);
/* Set memory burst size */
dst_maxburst
=
STM32_DMA_MAX_BURST
;
dst_best_burst
=
stm32_dma_get_best_burst
(
buf_len
,
dst_maxburst
,
threshold
,
dst_addr_width
);
chan
->
mem_burst
=
dst_best_burst
;
dst_burst_size
=
stm32_dma_get_burst
(
chan
,
dst_best_burst
);
if
(
dst_burst_size
<
0
)
return
dst_burst_size
;
...
...
@@ -642,6 +806,11 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
STM32_DMA_SCR_PBURST
(
src_burst_size
)
|
STM32_DMA_SCR_MBURST
(
dst_burst_size
);
/* Set FIFO threshold */
chan
->
chan_reg
.
dma_sfcr
&=
~
STM32_DMA_SFCR_FTH_MASK
;
chan
->
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_FTH
(
threshold
);
/* Set peripheral address */
chan
->
chan_reg
.
dma_spar
=
chan
->
dma_sconfig
.
src_addr
;
*
buswidth
=
chan
->
dma_sconfig
.
src_addr_width
;
break
;
...
...
@@ -651,8 +820,9 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return
-
EINVAL
;
}
stm32_dma_set_fifo_config
(
chan
,
src_
maxburst
,
dst_max
burst
);
stm32_dma_set_fifo_config
(
chan
,
src_
best_burst
,
dst_best_
burst
);
/* Set DMA control register */
chan
->
chan_reg
.
dma_scr
&=
~
(
STM32_DMA_SCR_DIR_MASK
|
STM32_DMA_SCR_PSIZE_MASK
|
STM32_DMA_SCR_MSIZE_MASK
|
STM32_DMA_SCR_PBURST_MASK
|
STM32_DMA_SCR_MBURST_MASK
);
...
...
@@ -692,10 +862,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
if
(
!
desc
)
return
NULL
;
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
);
if
(
ret
<
0
)
goto
err
;
/* Set peripheral flow controller */
if
(
chan
->
dma_sconfig
.
device_fc
)
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_PFCTRL
;
...
...
@@ -703,10 +869,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
chan
->
chan_reg
.
dma_scr
&=
~
STM32_DMA_SCR_PFCTRL
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
,
sg_dma_len
(
sg
));
if
(
ret
<
0
)
goto
err
;
desc
->
sg_req
[
i
].
len
=
sg_dma_len
(
sg
);
nb_data_items
=
desc
->
sg_req
[
i
].
len
/
buswidth
;
if
(
nb_data_items
>
STM32_DMA_MAX_DATA_ITEMS
)
{
if
(
nb_data_items
>
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
)
{
dev_err
(
chan2dev
(
chan
),
"nb items not supported
\n
"
);
goto
err
;
}
...
...
@@ -767,12 +938,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
return
NULL
;
}
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
);
ret
=
stm32_dma_set_xfer_param
(
chan
,
direction
,
&
buswidth
,
period_len
);
if
(
ret
<
0
)
return
NULL
;
nb_data_items
=
period_len
/
buswidth
;
if
(
nb_data_items
>
STM32_DMA_MAX_DATA_ITEMS
)
{
if
(
nb_data_items
>
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
)
{
dev_err
(
chan2dev
(
chan
),
"number of items not supported
\n
"
);
return
NULL
;
}
...
...
@@ -816,35 +987,45 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
dma_addr_t
src
,
size_t
len
,
unsigned
long
flags
)
{
struct
stm32_dma_chan
*
chan
=
to_stm32_dma_chan
(
c
);
u32
num_sgs
;
enum
dma_slave_buswidth
max_width
;
struct
stm32_dma_desc
*
desc
;
size_t
xfer_count
,
offset
;
u32
num_sgs
,
best_burst
,
dma_burst
,
threshold
;
int
i
;
num_sgs
=
DIV_ROUND_UP
(
len
,
STM32_DMA_MAX_DATA_ITEMS
);
num_sgs
=
DIV_ROUND_UP
(
len
,
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
);
desc
=
stm32_dma_alloc_desc
(
num_sgs
);
if
(
!
desc
)
return
NULL
;
threshold
=
chan
->
threshold
;
for
(
offset
=
0
,
i
=
0
;
offset
<
len
;
offset
+=
xfer_count
,
i
++
)
{
xfer_count
=
min_t
(
size_t
,
len
-
offset
,
STM32_DMA_MAX_DATA_ITEMS
);
STM32_DMA_
ALIGNED_
MAX_DATA_ITEMS
);
desc
->
sg_req
[
i
].
len
=
xfer_count
;
/* Compute best burst size */
max_width
=
DMA_SLAVE_BUSWIDTH_1_BYTE
;
best_burst
=
stm32_dma_get_best_burst
(
len
,
STM32_DMA_MAX_BURST
,
threshold
,
max_width
);
dma_burst
=
stm32_dma_get_burst
(
chan
,
best_burst
);
stm32_dma_clear_reg
(
&
desc
->
sg_req
[
i
].
chan_reg
);
desc
->
sg_req
[
i
].
chan_reg
.
dma_scr
=
STM32_DMA_SCR_DIR
(
STM32_DMA_MEM_TO_MEM
)
|
STM32_DMA_SCR_PBURST
(
dma_burst
)
|
STM32_DMA_SCR_MBURST
(
dma_burst
)
|
STM32_DMA_SCR_MINC
|
STM32_DMA_SCR_PINC
|
STM32_DMA_SCR_TCIE
|
STM32_DMA_SCR_TEIE
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
=
STM32_DMA_SFCR_DMDIS
|
STM32_DMA_SFCR_FTH
(
STM32_DMA_FIFO_THRESHOLD_FULL
)
|
STM32_DMA_SFCR_F
EIE
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_MASK
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sfcr
|=
STM32_DMA_SFCR_F
TH
(
threshold
)
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_spar
=
src
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sm0ar
=
dest
+
offset
;
desc
->
sg_req
[
i
].
chan_reg
.
dma_sndtr
=
xfer_count
;
desc
->
sg_req
[
i
].
len
=
xfer_count
;
}
desc
->
num_sgs
=
num_sgs
;
...
...
@@ -869,6 +1050,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct
stm32_dma_desc
*
desc
,
u32
next_sg
)
{
u32
modulo
,
burst_size
;
u32
residue
=
0
;
int
i
;
...
...
@@ -876,8 +1058,10 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
* In cyclic mode, for the last period, residue = remaining bytes from
* NDTR
*/
if
(
chan
->
desc
->
cyclic
&&
next_sg
==
0
)
return
stm32_dma_get_remaining_bytes
(
chan
);
if
(
chan
->
desc
->
cyclic
&&
next_sg
==
0
)
{
residue
=
stm32_dma_get_remaining_bytes
(
chan
);
goto
end
;
}
/*
* For all other periods in cyclic mode, and in sg mode,
...
...
@@ -888,6 +1072,15 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
residue
+=
desc
->
sg_req
[
i
].
len
;
residue
+=
stm32_dma_get_remaining_bytes
(
chan
);
end:
if
(
!
chan
->
mem_burst
)
return
residue
;
burst_size
=
chan
->
mem_burst
*
chan
->
mem_width
;
modulo
=
residue
%
burst_size
;
if
(
modulo
)
residue
=
residue
-
modulo
+
burst_size
;
return
residue
;
}
...
...
@@ -902,7 +1095,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
u32
residue
=
0
;
status
=
dma_cookie_status
(
c
,
cookie
,
state
);
if
(
(
status
==
DMA_COMPLETE
)
||
(
!
state
)
)
if
(
status
==
DMA_COMPLETE
||
!
state
)
return
status
;
spin_lock_irqsave
(
&
chan
->
vchan
.
lock
,
flags
);
...
...
@@ -966,7 +1159,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
}
static
void
stm32_dma_set_config
(
struct
stm32_dma_chan
*
chan
,
struct
stm32_dma_cfg
*
cfg
)
struct
stm32_dma_cfg
*
cfg
)
{
stm32_dma_clear_reg
(
&
chan
->
chan_reg
);
...
...
@@ -976,7 +1169,7 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
/* Enable Interrupts */
chan
->
chan_reg
.
dma_scr
|=
STM32_DMA_SCR_TEIE
|
STM32_DMA_SCR_TCIE
;
chan
->
chan_reg
.
dma_sfcr
=
cfg
->
threshold
&
STM32_DMA_SFCR_FTH_MASK
;
chan
->
threshold
=
STM32_DMA_THRESHOLD_FTR_GET
(
cfg
->
features
)
;
}
static
struct
dma_chan
*
stm32_dma_of_xlate
(
struct
of_phandle_args
*
dma_spec
,
...
...
@@ -996,10 +1189,10 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
cfg
.
channel_id
=
dma_spec
->
args
[
0
];
cfg
.
request_line
=
dma_spec
->
args
[
1
];
cfg
.
stream_config
=
dma_spec
->
args
[
2
];
cfg
.
threshold
=
dma_spec
->
args
[
3
];
cfg
.
features
=
dma_spec
->
args
[
3
];
if
(
(
cfg
.
channel_id
>=
STM32_DMA_MAX_CHANNELS
)
||
(
cfg
.
request_line
>=
STM32_DMA_MAX_REQUEST_ID
)
)
{
if
(
cfg
.
channel_id
>=
STM32_DMA_MAX_CHANNELS
||
cfg
.
request_line
>=
STM32_DMA_MAX_REQUEST_ID
)
{
dev_err
(
dev
,
"Bad channel and/or request id
\n
"
);
return
NULL
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment