Commit 3bcfca61 authored by Mark Brown's avatar Mark Brown

Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/cadence',...

Merge remote-tracking branches 'spi/topic/atmel', 'spi/topic/cadence', 'spi/topic/dw' and 'spi/topic/fsl-cpm' into spi-next
......@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
/* SPI register offsets */
#define SPI_CR 0x0000
......@@ -191,6 +192,8 @@
#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
#define AUTOSUSPEND_TIMEOUT 2000
struct atmel_spi_dma {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
......@@ -414,23 +417,6 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
return err;
}
static bool filter(struct dma_chan *chan, void *pdata)
{
struct atmel_spi_dma *sl_pdata = pdata;
struct at_dma_slave *sl;
if (!sl_pdata)
return false;
sl = &sl_pdata->dma_slave;
if (sl->dma_dev == chan->device->dev) {
chan->private = sl;
return true;
} else {
return false;
}
}
static int atmel_spi_configure_dma(struct atmel_spi *as)
{
struct dma_slave_config slave_config;
......@@ -441,19 +427,24 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
as->dma.chan_tx = dma_request_slave_channel_compat(mask, filter,
&as->dma,
dev, "tx");
if (!as->dma.chan_tx) {
as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
if (IS_ERR(as->dma.chan_tx)) {
err = PTR_ERR(as->dma.chan_tx);
if (err == -EPROBE_DEFER) {
dev_warn(dev, "no DMA channel available at the moment\n");
return err;
}
dev_err(dev,
"DMA TX channel not available, SPI unable to use DMA\n");
err = -EBUSY;
goto error;
}
as->dma.chan_rx = dma_request_slave_channel_compat(mask, filter,
&as->dma,
dev, "rx");
/*
* No reason to check EPROBE_DEFER here since we have already requested
* tx channel. If it fails here, it's for another reason.
*/
as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
if (!as->dma.chan_rx) {
dev_err(dev,
......@@ -474,7 +465,7 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
error:
if (as->dma.chan_rx)
dma_release_channel(as->dma.chan_rx);
if (as->dma.chan_tx)
if (!IS_ERR(as->dma.chan_tx))
dma_release_channel(as->dma.chan_tx);
return err;
}
......@@ -482,11 +473,9 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
static void atmel_spi_stop_dma(struct atmel_spi *as)
{
if (as->dma.chan_rx)
as->dma.chan_rx->device->device_control(as->dma.chan_rx,
DMA_TERMINATE_ALL, 0);
dmaengine_terminate_all(as->dma.chan_rx);
if (as->dma.chan_tx)
as->dma.chan_tx->device->device_control(as->dma.chan_tx,
DMA_TERMINATE_ALL, 0);
dmaengine_terminate_all(as->dma.chan_tx);
}
static void atmel_spi_release_dma(struct atmel_spi *as)
......@@ -1315,6 +1304,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->setup = atmel_spi_setup;
master->transfer_one_message = atmel_spi_transfer_one_message;
master->cleanup = atmel_spi_cleanup;
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
as = spi_master_get_devdata(master);
......@@ -1347,8 +1337,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
if (atmel_spi_configure_dma(as) == 0)
ret = atmel_spi_configure_dma(as);
if (ret == 0)
as->use_dma = true;
else if (ret == -EPROBE_DEFER)
return ret;
} else {
as->use_pdc = true;
}
......@@ -1387,6 +1380,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
(unsigned long)regs->start, irq);
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
goto out_free_dma;
......@@ -1394,6 +1392,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
return 0;
out_free_dma:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
atmel_spi_release_dma(as);
......@@ -1415,6 +1416,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
spin_lock_irq(&as->lock);
if (as->use_dma) {
......@@ -1432,14 +1435,37 @@ static int atmel_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(as->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int atmel_spi_suspend(struct device *dev)
#ifdef CONFIG_PM
static int atmel_spi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
clk_disable_unprepare(as->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
static int atmel_spi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
pinctrl_pm_select_default_state(dev);
return clk_prepare_enable(as->clk);
}
static int atmel_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
/* Stop the queue running */
......@@ -1449,9 +1475,8 @@ static int atmel_spi_suspend(struct device *dev)
return ret;
}
clk_disable_unprepare(as->clk);
pinctrl_pm_select_sleep_state(dev);
if (!pm_runtime_suspended(dev))
atmel_spi_runtime_suspend(dev);
return 0;
}
......@@ -1459,12 +1484,13 @@ static int atmel_spi_suspend(struct device *dev)
static int atmel_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
int ret;
pinctrl_pm_select_default_state(dev);
clk_prepare_enable(as->clk);
if (!pm_runtime_suspended(dev)) {
ret = atmel_spi_runtime_resume(dev);
if (ret)
return ret;
}
/* Start the queue running */
ret = spi_master_resume(master);
......@@ -1474,8 +1500,11 @@ static int atmel_spi_resume(struct device *dev)
return ret;
}
static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
static const struct dev_pm_ops atmel_spi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
atmel_spi_runtime_resume, NULL)
};
#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
#else
#define ATMEL_SPI_PM_OPS NULL
......
......@@ -521,6 +521,17 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
else
master->num_chipselect = num_cs;
ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
&xspi->is_decoded_cs);
if (ret < 0)
xspi->is_decoded_cs = 0;
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
......@@ -539,19 +550,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto remove_master;
}
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
else
master->num_chipselect = num_cs;
ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
&xspi->is_decoded_cs);
if (ret < 0)
xspi->is_decoded_cs = 0;
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
master->prepare_message = cdns_prepare_message;
master->transfer_one = cdns_transfer_one;
......
......@@ -26,6 +26,9 @@
#include <linux/intel_mid_dma.h>
#include <linux/pci.h>
#define RX_BUSY 0
#define TX_BUSY 1
struct mid_dma {
struct intel_mid_dma_slave dmas_tx;
struct intel_mid_dma_slave dmas_rx;
......@@ -98,41 +101,26 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
}
/*
* dws->dma_chan_done is cleared before the dma transfer starts,
* callback for rx/tx channel will each increment it by 1.
* Reaching 2 means the whole spi transaction is done.
* dws->dma_chan_busy is set before the dma transfer starts, callback for tx
* channel will clear a corresponding bit.
*/
static void dw_spi_dma_done(void *arg)
static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
if (++dws->dma_chan_done != 2)
if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
return;
dw_spi_xfer_done(dws);
}
static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws)
{
struct dma_async_tx_descriptor *txdesc, *rxdesc;
struct dma_slave_config txconf, rxconf;
u16 dma_ctrl = 0;
/* 1. setup DMA related registers */
if (cs_change) {
spi_enable_chip(dws, 0);
dw_writew(dws, DW_SPI_DMARDLR, 0xf);
dw_writew(dws, DW_SPI_DMATDLR, 0x10);
if (dws->tx_dma)
dma_ctrl |= SPI_DMA_TDMAE;
if (dws->rx_dma)
dma_ctrl |= SPI_DMA_RDMAE;
dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
spi_enable_chip(dws, 1);
}
struct dma_slave_config txconf;
struct dma_async_tx_descriptor *txdesc;
dws->dma_chan_done = 0;
if (!dws->tx_dma)
return NULL;
/* 2. Prepare the TX dma transfer */
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
......@@ -151,10 +139,33 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
txdesc->callback = dw_spi_dma_done;
txdesc->callback = dw_spi_dma_tx_done;
txdesc->callback_param = dws;
/* 3. Prepare the RX dma transfer */
return txdesc;
}
/*
* dws->dma_chan_busy is set before the dma transfer starts, callback for rx
* channel will clear a corresponding bit.
*/
static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
return;
dw_spi_xfer_done(dws);
}
static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws)
{
struct dma_slave_config rxconf;
struct dma_async_tx_descriptor *rxdesc;
if (!dws->rx_dma)
return NULL;
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
......@@ -173,15 +184,56 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback = dw_spi_dma_rx_done;
rxdesc->callback_param = dws;
return rxdesc;
}
static void dw_spi_dma_setup(struct dw_spi *dws)
{
u16 dma_ctrl = 0;
spi_enable_chip(dws, 0);
dw_writew(dws, DW_SPI_DMARDLR, 0xf);
dw_writew(dws, DW_SPI_DMATDLR, 0x10);
if (dws->tx_dma)
dma_ctrl |= SPI_DMA_TDMAE;
if (dws->rx_dma)
dma_ctrl |= SPI_DMA_RDMAE;
dw_writew(dws, DW_SPI_DMACR, dma_ctrl);
spi_enable_chip(dws, 1);
}
static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
{
struct dma_async_tx_descriptor *txdesc, *rxdesc;
/* 1. setup DMA related registers */
if (cs_change)
dw_spi_dma_setup(dws);
/* 2. Prepare the TX dma transfer */
txdesc = dw_spi_dma_prepare_tx(dws);
/* 3. Prepare the RX dma transfer */
rxdesc = dw_spi_dma_prepare_rx(dws);
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
set_bit(RX_BUSY, &dws->dma_chan_busy);
dmaengine_submit(rxdesc);
dma_async_issue_pending(dws->rxchan);
}
if (txdesc) {
set_bit(TX_BUSY, &dws->dma_chan_busy);
dmaengine_submit(txdesc);
dma_async_issue_pending(dws->txchan);
}
return 0;
}
......
......@@ -139,7 +139,7 @@ struct dw_spi {
struct scatterlist tx_sgl;
struct dma_chan *rxchan;
struct scatterlist rx_sgl;
int dma_chan_done;
unsigned long dma_chan_busy;
struct device *dma_dev;
dma_addr_t dma_addr; /* phy address of the Data register */
struct dw_spi_dma_ops *dma_ops;
......
......@@ -56,12 +56,15 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
if (mspi->flags & SPI_CPM1) {
out_be32(&mspi->pram->rstate, 0);
out_be16(&mspi->pram->rbptr,
in_be16(&mspi->pram->rbase));
out_be32(&mspi->pram->tstate, 0);
out_be16(&mspi->pram->tbptr,
in_be16(&mspi->pram->tbase));
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment