Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a7838984
Commit
a7838984
authored
Mar 30, 2014
by
Mark Brown
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'spi/topic/dma' into spi-next
parents
5d0eb26c
51327353
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
310 additions
and
108 deletions
+310
-108
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-s3c64xx.c
+99
-108
drivers/spi/spi.c
drivers/spi/spi.c
+180
-0
include/linux/spi/spi.h
include/linux/spi/spi.h
+31
-0
No files found.
drivers/spi/spi-s3c64xx.c
View file @
a7838984
...
...
@@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
#else
static
void
prepare_dma
(
struct
s3c64xx_spi_dma_data
*
dma
,
unsigned
len
,
dma_addr_t
buf
)
struct
sg_table
*
sgt
)
{
struct
s3c64xx_spi_driver_data
*
sdd
;
struct
dma_slave_config
config
;
...
...
@@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config
(
dma
->
ch
,
&
config
);
}
desc
=
dmaengine_prep_slave_s
ingle
(
dma
->
ch
,
buf
,
len
,
dma
->
direction
,
DMA_PREP_INTERRUPT
);
desc
=
dmaengine_prep_slave_s
g
(
dma
->
ch
,
sgt
->
sgl
,
sgt
->
nents
,
dma
->
direction
,
DMA_PREP_INTERRUPT
);
desc
->
callback
=
s3c64xx_spi_dmacb
;
desc
->
callback_param
=
dma
;
...
...
@@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg
|=
S3C64XX_SPI_CH_TXCH_ON
;
if
(
dma_mode
)
{
modecfg
|=
S3C64XX_SPI_MODE_TXDMA_ON
;
#ifndef CONFIG_S3C_DMA
prepare_dma
(
&
sdd
->
tx_dma
,
&
xfer
->
tx_sg
);
#else
prepare_dma
(
&
sdd
->
tx_dma
,
xfer
->
len
,
xfer
->
tx_dma
);
#endif
}
else
{
switch
(
sdd
->
cur_bpw
)
{
case
32
:
...
...
@@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel
(((
xfer
->
len
*
8
/
sdd
->
cur_bpw
)
&
0xffff
)
|
S3C64XX_SPI_PACKET_CNT_EN
,
regs
+
S3C64XX_SPI_PACKET_CNT
);
#ifndef CONFIG_S3C_DMA
prepare_dma
(
&
sdd
->
rx_dma
,
&
xfer
->
rx_sg
);
#else
prepare_dma
(
&
sdd
->
rx_dma
,
xfer
->
len
,
xfer
->
rx_dma
);
#endif
}
}
...
...
@@ -555,23 +563,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel
(
chcfg
,
regs
+
S3C64XX_SPI_CH_CFG
);
}
static
inline
void
enable_cs
(
struct
s3c64xx_spi_driver_data
*
sdd
,
struct
spi_device
*
spi
)
{
if
(
sdd
->
tgl_spi
!=
NULL
)
{
/* If last device toggled after mssg */
if
(
sdd
->
tgl_spi
!=
spi
)
{
/* if last mssg on diff device */
/* Deselect the last toggled device */
if
(
spi
->
cs_gpio
>=
0
)
gpio_set_value
(
spi
->
cs_gpio
,
spi
->
mode
&
SPI_CS_HIGH
?
0
:
1
);
}
sdd
->
tgl_spi
=
NULL
;
}
if
(
spi
->
cs_gpio
>=
0
)
gpio_set_value
(
spi
->
cs_gpio
,
spi
->
mode
&
SPI_CS_HIGH
?
1
:
0
);
}
static
u32
s3c64xx_spi_wait_for_timeout
(
struct
s3c64xx_spi_driver_data
*
sdd
,
int
timeout_ms
)
{
...
...
@@ -593,112 +584,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return
RX_FIFO_LVL
(
status
,
sdd
);
}
static
int
wait_for_
xfer
(
struct
s3c64xx_spi_driver_data
*
sdd
,
struct
spi_transfer
*
xfer
,
int
dma_mode
)
static
int
wait_for_
dma
(
struct
s3c64xx_spi_driver_data
*
sdd
,
struct
spi_transfer
*
xfer
)
{
void
__iomem
*
regs
=
sdd
->
regs
;
unsigned
long
val
;
u32
status
;
int
ms
;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms
=
xfer
->
len
*
8
*
1000
/
sdd
->
cur_speed
;
ms
+=
10
;
/* some tolerance */
if
(
dma_mode
)
{
val
=
msecs_to_jiffies
(
ms
)
+
10
;
val
=
wait_for_completion_timeout
(
&
sdd
->
xfer_completion
,
val
);
}
else
{
u32
status
;
val
=
msecs_to_loops
(
ms
);
do
{
val
=
msecs_to_jiffies
(
ms
)
+
10
;
val
=
wait_for_completion_timeout
(
&
sdd
->
xfer_completion
,
val
);
/*
* If the previous xfer was completed within timeout, then
* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if
(
val
&&
!
xfer
->
rx_buf
)
{
val
=
msecs_to_loops
(
10
);
status
=
readl
(
regs
+
S3C64XX_SPI_STATUS
);
while
((
TX_FIFO_LVL
(
status
,
sdd
)
||
!
S3C64XX_SPI_ST_TX_DONE
(
status
,
sdd
))
&&
--
val
)
{
cpu_relax
();
status
=
readl
(
regs
+
S3C64XX_SPI_STATUS
);
}
while
(
RX_FIFO_LVL
(
status
,
sdd
)
<
xfer
->
len
&&
--
val
);
}
}
if
(
dma_mode
)
{
u32
status
;
/*
* If the previous xfer was completed within timeout, then
* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if
(
val
&&
!
xfer
->
rx_buf
)
{
val
=
msecs_to_loops
(
10
);
status
=
readl
(
regs
+
S3C64XX_SPI_STATUS
);
while
((
TX_FIFO_LVL
(
status
,
sdd
)
||
!
S3C64XX_SPI_ST_TX_DONE
(
status
,
sdd
))
&&
--
val
)
{
cpu_relax
();
status
=
readl
(
regs
+
S3C64XX_SPI_STATUS
);
}
/* If timed out while checking rx/tx status return error */
if
(
!
val
)
return
-
EIO
;
}
return
0
;
}
/* If timed out while checking rx/tx status return error */
if
(
!
val
)
return
-
EIO
;
}
else
{
int
loops
;
u32
cpy_len
;
u8
*
buf
;
/* If it was only Tx */
if
(
!
xfer
->
rx_buf
)
{
sdd
->
state
&=
~
TXBUSY
;
return
0
;
}
static
int
wait_for_pio
(
struct
s3c64xx_spi_driver_data
*
sdd
,
struct
spi_transfer
*
xfer
)
{
void
__iomem
*
regs
=
sdd
->
regs
;
unsigned
long
val
;
u32
status
;
int
loops
;
u32
cpy_len
;
u8
*
buf
;
int
ms
;
/*
* If the receive length is bigger than the controller fifo
* size, calculate the loops and read the fifo as many times.
* loops = length / max fifo size (calculated by using the
* fifo mask).
* For any size less than the fifo size the below code is
* executed atleast once.
*/
loops
=
xfer
->
len
/
((
FIFO_LVL_MASK
(
sdd
)
>>
1
)
+
1
);
buf
=
xfer
->
rx_buf
;
do
{
/* wait for data to be received in the fifo */
cpy_len
=
s3c64xx_spi_wait_for_timeout
(
sdd
,
(
loops
?
ms
:
0
));
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms
=
xfer
->
len
*
8
*
1000
/
sdd
->
cur_speed
;
ms
+=
10
;
/* some tolerance */
switch
(
sdd
->
cur_bpw
)
{
case
32
:
ioread32_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
/
4
);
break
;
case
16
:
ioread16_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
/
2
);
break
;
default:
ioread8_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
);
break
;
}
val
=
msecs_to_loops
(
ms
);
do
{
status
=
readl
(
regs
+
S3C64XX_SPI_STATUS
);
}
while
(
RX_FIFO_LVL
(
status
,
sdd
)
<
xfer
->
len
&&
--
val
);
buf
=
buf
+
cpy_len
;
}
while
(
loops
--
);
sdd
->
state
&=
~
RXBUSY
;
/* If it was only Tx */
if
(
!
xfer
->
rx_buf
)
{
sdd
->
state
&=
~
TXBUSY
;
return
0
;
}
return
0
;
}
/*
* If the receive length is bigger than the controller fifo
* size, calculate the loops and read the fifo as many times.
* loops = length / max fifo size (calculated by using the
* fifo mask).
* For any size less than the fifo size the below code is
* executed atleast once.
*/
loops
=
xfer
->
len
/
((
FIFO_LVL_MASK
(
sdd
)
>>
1
)
+
1
);
buf
=
xfer
->
rx_buf
;
do
{
/* wait for data to be received in the fifo */
cpy_len
=
s3c64xx_spi_wait_for_timeout
(
sdd
,
(
loops
?
ms
:
0
));
switch
(
sdd
->
cur_bpw
)
{
case
32
:
ioread32_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
/
4
);
break
;
case
16
:
ioread16_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
/
2
);
break
;
default:
ioread8_rep
(
regs
+
S3C64XX_SPI_RX_DATA
,
buf
,
cpy_len
);
break
;
}
static
inline
void
disable_cs
(
struct
s3c64xx_spi_driver_data
*
sdd
,
struct
spi_device
*
spi
)
{
if
(
sdd
->
tgl_spi
==
spi
)
sdd
->
tgl_spi
=
NULL
;
buf
=
buf
+
cpy_len
;
}
while
(
loops
--
);
sdd
->
state
&=
~
RXBUSY
;
if
(
spi
->
cs_gpio
>=
0
)
gpio_set_value
(
spi
->
cs_gpio
,
spi
->
mode
&
SPI_CS_HIGH
?
0
:
1
);
return
0
;
}
static
void
s3c64xx_spi_config
(
struct
s3c64xx_spi_driver_data
*
sdd
)
...
...
@@ -929,7 +919,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
spin_unlock_irqrestore
(
&
sdd
->
lock
,
flags
);
status
=
wait_for_xfer
(
sdd
,
xfer
,
use_dma
);
if
(
use_dma
)
status
=
wait_for_dma
(
sdd
,
xfer
);
else
status
=
wait_for_pio
(
sdd
,
xfer
);
if
(
status
)
{
dev_err
(
&
spi
->
dev
,
"I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d
\n
"
,
...
...
@@ -1092,14 +1085,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_put
(
&
sdd
->
pdev
->
dev
);
writel
(
S3C64XX_SPI_SLAVE_SIG_INACT
,
sdd
->
regs
+
S3C64XX_SPI_SLAVE_SEL
);
disable_cs
(
sdd
,
spi
);
return
0
;
setup_exit:
pm_runtime_put
(
&
sdd
->
pdev
->
dev
);
/* setup() returns with device de-selected */
writel
(
S3C64XX_SPI_SLAVE_SIG_INACT
,
sdd
->
regs
+
S3C64XX_SPI_SLAVE_SEL
);
disable_cs
(
sdd
,
spi
);
gpio_free
(
cs
->
line
);
spi_set_ctldata
(
spi
,
NULL
);
...
...
drivers/spi/spi.c
View file @
a7838984
...
...
@@ -24,6 +24,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
...
...
@@ -578,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi
->
master
->
set_cs
(
spi
,
!
enable
);
}
static
int
spi_map_buf
(
struct
spi_master
*
master
,
struct
device
*
dev
,
struct
sg_table
*
sgt
,
void
*
buf
,
size_t
len
,
enum
dma_data_direction
dir
)
{
const
bool
vmalloced_buf
=
is_vmalloc_addr
(
buf
);
const
int
desc_len
=
vmalloced_buf
?
PAGE_SIZE
:
master
->
max_dma_len
;
const
int
sgs
=
DIV_ROUND_UP
(
len
,
desc_len
);
struct
page
*
vm_page
;
void
*
sg_buf
;
size_t
min
;
int
i
,
ret
;
ret
=
sg_alloc_table
(
sgt
,
sgs
,
GFP_KERNEL
);
if
(
ret
!=
0
)
return
ret
;
for
(
i
=
0
;
i
<
sgs
;
i
++
)
{
min
=
min_t
(
size_t
,
len
,
desc_len
);
if
(
vmalloced_buf
)
{
vm_page
=
vmalloc_to_page
(
buf
);
if
(
!
vm_page
)
{
sg_free_table
(
sgt
);
return
-
ENOMEM
;
}
sg_buf
=
page_address
(
vm_page
)
+
((
size_t
)
buf
&
~
PAGE_MASK
);
}
else
{
sg_buf
=
buf
;
}
sg_set_buf
(
&
sgt
->
sgl
[
i
],
sg_buf
,
min
);
buf
+=
min
;
len
-=
min
;
}
ret
=
dma_map_sg
(
dev
,
sgt
->
sgl
,
sgt
->
nents
,
dir
);
if
(
ret
<
0
)
{
sg_free_table
(
sgt
);
return
ret
;
}
sgt
->
nents
=
ret
;
return
0
;
}
static
void
spi_unmap_buf
(
struct
spi_master
*
master
,
struct
device
*
dev
,
struct
sg_table
*
sgt
,
enum
dma_data_direction
dir
)
{
if
(
sgt
->
orig_nents
)
{
dma_unmap_sg
(
dev
,
sgt
->
sgl
,
sgt
->
orig_nents
,
dir
);
sg_free_table
(
sgt
);
}
}
static
int
spi_map_msg
(
struct
spi_master
*
master
,
struct
spi_message
*
msg
)
{
struct
device
*
tx_dev
,
*
rx_dev
;
struct
spi_transfer
*
xfer
;
void
*
tmp
;
unsigned
int
max_tx
,
max_rx
;
int
ret
;
if
(
master
->
flags
&
(
SPI_MASTER_MUST_RX
|
SPI_MASTER_MUST_TX
))
{
max_tx
=
0
;
max_rx
=
0
;
list_for_each_entry
(
xfer
,
&
msg
->
transfers
,
transfer_list
)
{
if
((
master
->
flags
&
SPI_MASTER_MUST_TX
)
&&
!
xfer
->
tx_buf
)
max_tx
=
max
(
xfer
->
len
,
max_tx
);
if
((
master
->
flags
&
SPI_MASTER_MUST_RX
)
&&
!
xfer
->
rx_buf
)
max_rx
=
max
(
xfer
->
len
,
max_rx
);
}
if
(
max_tx
)
{
tmp
=
krealloc
(
master
->
dummy_tx
,
max_tx
,
GFP_KERNEL
|
GFP_DMA
);
if
(
!
tmp
)
return
-
ENOMEM
;
master
->
dummy_tx
=
tmp
;
memset
(
tmp
,
0
,
max_tx
);
}
if
(
max_rx
)
{
tmp
=
krealloc
(
master
->
dummy_rx
,
max_rx
,
GFP_KERNEL
|
GFP_DMA
);
if
(
!
tmp
)
return
-
ENOMEM
;
master
->
dummy_rx
=
tmp
;
}
if
(
max_tx
||
max_rx
)
{
list_for_each_entry
(
xfer
,
&
msg
->
transfers
,
transfer_list
)
{
if
(
!
xfer
->
tx_buf
)
xfer
->
tx_buf
=
master
->
dummy_tx
;
if
(
!
xfer
->
rx_buf
)
xfer
->
rx_buf
=
master
->
dummy_rx
;
}
}
}
if
(
!
master
->
can_dma
)
return
0
;
tx_dev
=
&
master
->
dma_tx
->
dev
->
device
;
rx_dev
=
&
master
->
dma_rx
->
dev
->
device
;
list_for_each_entry
(
xfer
,
&
msg
->
transfers
,
transfer_list
)
{
if
(
!
master
->
can_dma
(
master
,
msg
->
spi
,
xfer
))
continue
;
if
(
xfer
->
tx_buf
!=
NULL
)
{
ret
=
spi_map_buf
(
master
,
tx_dev
,
&
xfer
->
tx_sg
,
(
void
*
)
xfer
->
tx_buf
,
xfer
->
len
,
DMA_TO_DEVICE
);
if
(
ret
!=
0
)
return
ret
;
}
if
(
xfer
->
rx_buf
!=
NULL
)
{
ret
=
spi_map_buf
(
master
,
rx_dev
,
&
xfer
->
rx_sg
,
xfer
->
rx_buf
,
xfer
->
len
,
DMA_FROM_DEVICE
);
if
(
ret
!=
0
)
{
spi_unmap_buf
(
master
,
tx_dev
,
&
xfer
->
tx_sg
,
DMA_TO_DEVICE
);
return
ret
;
}
}
}
master
->
cur_msg_mapped
=
true
;
return
0
;
}
static
int
spi_unmap_msg
(
struct
spi_master
*
master
,
struct
spi_message
*
msg
)
{
struct
spi_transfer
*
xfer
;
struct
device
*
tx_dev
,
*
rx_dev
;
if
(
!
master
->
cur_msg_mapped
||
!
master
->
can_dma
)
return
0
;
tx_dev
=
&
master
->
dma_tx
->
dev
->
device
;
rx_dev
=
&
master
->
dma_rx
->
dev
->
device
;
list_for_each_entry
(
xfer
,
&
msg
->
transfers
,
transfer_list
)
{
if
(
!
master
->
can_dma
(
master
,
msg
->
spi
,
xfer
))
continue
;
spi_unmap_buf
(
master
,
rx_dev
,
&
xfer
->
rx_sg
,
DMA_FROM_DEVICE
);
spi_unmap_buf
(
master
,
tx_dev
,
&
xfer
->
tx_sg
,
DMA_TO_DEVICE
);
}
return
0
;
}
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
...
...
@@ -684,6 +849,10 @@ static void spi_pump_messages(struct kthread_work *work)
}
master
->
busy
=
false
;
spin_unlock_irqrestore
(
&
master
->
queue_lock
,
flags
);
kfree
(
master
->
dummy_rx
);
master
->
dummy_rx
=
NULL
;
kfree
(
master
->
dummy_tx
);
master
->
dummy_tx
=
NULL
;
if
(
master
->
unprepare_transfer_hardware
&&
master
->
unprepare_transfer_hardware
(
master
))
dev_err
(
&
master
->
dev
,
...
...
@@ -750,6 +919,13 @@ static void spi_pump_messages(struct kthread_work *work)
master
->
cur_msg_prepared
=
true
;
}
ret
=
spi_map_msg
(
master
,
master
->
cur_msg
);
if
(
ret
)
{
master
->
cur_msg
->
status
=
ret
;
spi_finalize_current_message
(
master
);
return
;
}
ret
=
master
->
transfer_one_message
(
master
,
master
->
cur_msg
);
if
(
ret
)
{
dev_err
(
&
master
->
dev
,
...
...
@@ -837,6 +1013,8 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work
(
&
master
->
kworker
,
&
master
->
pump_messages
);
spin_unlock_irqrestore
(
&
master
->
queue_lock
,
flags
);
spi_unmap_msg
(
master
,
mesg
);
if
(
master
->
cur_msg_prepared
&&
master
->
unprepare_message
)
{
ret
=
master
->
unprepare_message
(
master
,
mesg
);
if
(
ret
)
{
...
...
@@ -1370,6 +1548,8 @@ int spi_register_master(struct spi_master *master)
mutex_init
(
&
master
->
bus_lock_mutex
);
master
->
bus_lock_flag
=
0
;
init_completion
(
&
master
->
xfer_completion
);
if
(
!
master
->
max_dma_len
)
master
->
max_dma_len
=
INT_MAX
;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
...
...
include/linux/spi/spi.h
View file @
a7838984
...
...
@@ -24,6 +24,9 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
struct
dma_chan
;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
...
...
@@ -266,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @auto_runtime_pm: the core should ensure a runtime PM reference is held
* while the hardware is prepared, using the parent
* device for the spidev
* @max_dma_len: Maximum length of a DMA transfer for the device.
* @prepare_transfer_hardware: a message will soon arrive from the queue
* so the subsystem requests the driver to prepare the transfer hardware
* by issuing this call
...
...
@@ -348,6 +352,8 @@ struct spi_master {
#define SPI_MASTER_HALF_DUPLEX BIT(0)
/* can't do full duplex */
#define SPI_MASTER_NO_RX BIT(1)
/* can't do buffer read */
#define SPI_MASTER_NO_TX BIT(2)
/* can't do buffer write */
#define SPI_MASTER_MUST_RX BIT(3)
/* requires rx */
#define SPI_MASTER_MUST_TX BIT(4)
/* requires tx */
/* lock and mutex for SPI bus locking */
spinlock_t
bus_lock_spinlock
;
...
...
@@ -389,6 +395,17 @@ struct spi_master {
/* called on release() to free memory provided by spi_master */
void
(
*
cleanup
)(
struct
spi_device
*
spi
);
/*
* Used to enable core support for DMA handling, if can_dma()
* exists and returns true then the transfer will be mapped
* prior to transfer_one() being called. The driver should
* not modify or store xfer and dma_tx and dma_rx must be set
* while the device is prepared.
*/
bool
(
*
can_dma
)(
struct
spi_master
*
master
,
struct
spi_device
*
spi
,
struct
spi_transfer
*
xfer
);
/*
* These hooks are for drivers that want to use the generic
* master transfer queueing mechanism. If these are used, the
...
...
@@ -407,7 +424,9 @@ struct spi_master {
bool
rt
;
bool
auto_runtime_pm
;
bool
cur_msg_prepared
;
bool
cur_msg_mapped
;
struct
completion
xfer_completion
;
size_t
max_dma_len
;
int
(
*
prepare_transfer_hardware
)(
struct
spi_master
*
master
);
int
(
*
transfer_one_message
)(
struct
spi_master
*
master
,
...
...
@@ -428,6 +447,14 @@ struct spi_master {
/* gpio chip select */
int
*
cs_gpios
;
/* DMA channels for use with core dmaengine helpers */
struct
dma_chan
*
dma_tx
;
struct
dma_chan
*
dma_rx
;
/* dummy data for full duplex devices */
void
*
dummy_rx
;
void
*
dummy_tx
;
};
static
inline
void
*
spi_master_get_devdata
(
struct
spi_master
*
master
)
...
...
@@ -512,6 +539,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
...
...
@@ -579,6 +608,8 @@ struct spi_transfer {
dma_addr_t
tx_dma
;
dma_addr_t
rx_dma
;
struct
sg_table
tx_sg
;
struct
sg_table
rx_sg
;
unsigned
cs_change
:
1
;
unsigned
tx_nbits
:
3
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment