Commit bf931a01 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6

* 'next-spi' of git://git.secretlab.ca/git/linux-2.6:
  spi: spi_txx9.c: use resource_size()
  spi: spi_sh_sci.c: use resource_size()
  spi: spi_mpc8xxx.c: use resource_size()
  spi: spi_bfin5xx.c: use resource_size()
  spi: atmel_spi.c: use resource_size()
  spi: Add s3c64xx SPI Controller driver
  atmel_spi: fix dma addr calculation for len > BUFFER_SIZE
  spi_s3c24xx: add FIQ pseudo-DMA support
  spi: controller driver for Designware SPI core
  spidev: add proper section markers
  spidev: use DECLARE_BITMAP instead of declaring the array
parents 4e46aa08 d53342bf
......@@ -18,6 +18,8 @@ struct s3c2410_spi_info {
unsigned int num_cs; /* total chipselects */
int bus_num; /* bus number to use. */
unsigned int use_fiq:1; /* use fiq */
void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
......
......@@ -216,6 +216,17 @@ config SPI_S3C24XX
help
SPI driver for Samsung S3C24XX series ARM SoCs
config SPI_S3C24XX_FIQ
bool "S3C24XX driver with FIQ pseudo-DMA"
depends on SPI_S3C24XX
select FIQ
help
Enable FIQ support for the S3C24XX SPI driver to provide pseudo
DMA by using the fast-interrupt request framework, This allows
the driver to get DMA-like performance when there are either
no free DMA channels, or when doing transfers that required both
TX and RX data paths.
config SPI_S3C24XX_GPIO
tristate "Samsung S3C24XX series SPI by GPIO"
depends on ARCH_S3C2410 && EXPERIMENTAL
......@@ -226,6 +237,13 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on ARCH_S3C64XX && EXPERIMENTAL
select S3C64XX_DMA
help
SPI driver for Samsung S3C64XX and newer SoCs.
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on SUPERH && HAVE_CLK
......@@ -289,6 +307,16 @@ config SPI_NUC900
# Add new SPI master controllers in alphabetical order above this line
#
config SPI_DESIGNWARE
bool "DesignWare SPI controller core support"
depends on SPI_MASTER
help
general driver for SPI controller core from DesignWare
config SPI_DW_PCI
tristate "PCI interface driver for DW SPI core"
depends on SPI_DESIGNWARE && PCI
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
......
......@@ -16,6 +16,8 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
......@@ -30,7 +32,8 @@ obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
......@@ -39,6 +42,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
# special build for s3c24xx spi driver with fiq support
spi_s3c24xx_hw-y := spi_s3c24xx.o
spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
......
......@@ -189,14 +189,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
/* use scratch buffer only when rx or tx data is unspecified */
if (xfer->rx_buf)
*rx_dma = xfer->rx_dma + xfer->len - len;
*rx_dma = xfer->rx_dma + xfer->len - *plen;
else {
*rx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
len = BUFFER_SIZE;
}
if (xfer->tx_buf)
*tx_dma = xfer->tx_dma + xfer->len - len;
*tx_dma = xfer->tx_dma + xfer->len - *plen;
else {
*tx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
......@@ -788,7 +788,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
spin_lock_init(&as->lock);
INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
as->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
as->regs = ioremap(regs->start, resource_size(regs));
if (!as->regs)
goto out_free_buffer;
as->irq = irq;
......
/*
* dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
*
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/spi/dw_spi.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif
#define START_STATE ((void *)0)
#define RUNNING_STATE ((void *)1)
#define DONE_STATE ((void *)2)
#define ERROR_STATE ((void *)-1)
#define QUEUE_RUNNING 0
#define QUEUE_STOPPED 1
#define MRST_SPI_DEASSERT 0
#define MRST_SPI_ASSERT 1
/* Slave spi_dev related */
struct chip_data {
u16 cr0;
u8 cs; /* chip select pin */
u8 n_bytes; /* current is a 1/2/4 byte op */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
u32 dma_width;
u32 rx_threshold;
u32 tx_threshold;
u8 enable_dma;
u8 bits_per_word;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
int (*write)(struct dw_spi *dws);
int (*read)(struct dw_spi *dws);
void (*cs_control)(u32 command);
};
#ifdef CONFIG_DEBUG_FS
static int spi_show_regs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
#define SPI_REGS_BUFSIZE 1024
static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct dw_spi *dws;
char *buf;
u32 len = 0;
ssize_t ret;
dws = file->private_data;
buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
if (!buf)
return 0;
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"MRST SPI0 registers:\n");
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SER: \t\t0x%08x\n", dw_readl(dws, ser));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"SR: \t\t0x%08x\n", dw_readl(dws, sr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"IMR: \t\t0x%08x\n", dw_readl(dws, imr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"ISR: \t\t0x%08x\n", dw_readl(dws, isr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
"=================================\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations mrst_spi_regs_ops = {
.owner = THIS_MODULE,
.open = spi_show_regs_open,
.read = spi_show_regs,
};
static int mrst_spi_debugfs_init(struct dw_spi *dws)
{
dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
if (!dws->debugfs)
return -ENOMEM;
debugfs_create_file("registers", S_IFREG | S_IRUGO,
dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
return 0;
}
static void mrst_spi_debugfs_remove(struct dw_spi *dws)
{
if (dws->debugfs)
debugfs_remove_recursive(dws->debugfs);
}
#else
static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
{
}
static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
{
}
#endif /* CONFIG_DEBUG_FS */
static void wait_till_not_busy(struct dw_spi *dws)
{
unsigned long end = jiffies + usecs_to_jiffies(1000);
while (time_before(jiffies, end)) {
if (!(dw_readw(dws, sr) & SR_BUSY))
return;
}
dev_err(&dws->master->dev,
"DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
}
static void flush(struct dw_spi *dws)
{
while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
dw_readw(dws, dr);
wait_till_not_busy(dws);
}
static void null_cs_control(u32 command)
{
}
static int null_writer(struct dw_spi *dws)
{
u8 n_bytes = dws->n_bytes;
if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
|| (dws->tx == dws->tx_end))
return 0;
dw_writew(dws, dr, 0);
dws->tx += n_bytes;
wait_till_not_busy(dws);
return 1;
}
static int null_reader(struct dw_spi *dws)
{
u8 n_bytes = dws->n_bytes;
while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
&& (dws->rx < dws->rx_end)) {
dw_readw(dws, dr);
dws->rx += n_bytes;
}
wait_till_not_busy(dws);
return dws->rx == dws->rx_end;
}
static int u8_writer(struct dw_spi *dws)
{
if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
|| (dws->tx == dws->tx_end))
return 0;
dw_writew(dws, dr, *(u8 *)(dws->tx));
++dws->tx;
wait_till_not_busy(dws);
return 1;
}
static int u8_reader(struct dw_spi *dws)
{
while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
&& (dws->rx < dws->rx_end)) {
*(u8 *)(dws->rx) = dw_readw(dws, dr);
++dws->rx;
}
wait_till_not_busy(dws);
return dws->rx == dws->rx_end;
}
static int u16_writer(struct dw_spi *dws)
{
if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
|| (dws->tx == dws->tx_end))
return 0;
dw_writew(dws, dr, *(u16 *)(dws->tx));
dws->tx += 2;
wait_till_not_busy(dws);
return 1;
}
static int u16_reader(struct dw_spi *dws)
{
u16 temp;
while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
&& (dws->rx < dws->rx_end)) {
temp = dw_readw(dws, dr);
*(u16 *)(dws->rx) = temp;
dws->rx += 2;
}
wait_till_not_busy(dws);
return dws->rx == dws->rx_end;
}
static void *next_transfer(struct dw_spi *dws)
{
struct spi_message *msg = dws->cur_msg;
struct spi_transfer *trans = dws->cur_transfer;
/* Move to next transfer */
if (trans->transfer_list.next != &msg->transfers) {
dws->cur_transfer =
list_entry(trans->transfer_list.next,
struct spi_transfer,
transfer_list);
return RUNNING_STATE;
} else
return DONE_STATE;
}
/*
* Note: first step is the protocol driver prepares
* a dma-capable memory, and this func just need translate
* the virt addr to physical
*/
static int map_dma_buffers(struct dw_spi *dws)
{
if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
|| !dws->cur_chip->enable_dma)
return 0;
if (dws->cur_transfer->tx_dma)
dws->tx_dma = dws->cur_transfer->tx_dma;
if (dws->cur_transfer->rx_dma)
dws->rx_dma = dws->cur_transfer->rx_dma;
return 1;
}
/* Caller already set message->status; dma and pio irqs are blocked */
static void giveback(struct dw_spi *dws)
{
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
spin_lock_irqsave(&dws->lock, flags);
msg = dws->cur_msg;
dws->cur_msg = NULL;
dws->cur_transfer = NULL;
dws->prev_chip = dws->cur_chip;
dws->cur_chip = NULL;
dws->dma_mapped = 0;
queue_work(dws->workqueue, &dws->pump_messages);
spin_unlock_irqrestore(&dws->lock, flags);
last_transfer = list_entry(msg->transfers.prev,
struct spi_transfer,
transfer_list);
if (!last_transfer->cs_change)
dws->cs_control(MRST_SPI_DEASSERT);
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
}
static void int_error_stop(struct dw_spi *dws, const char *msg)
{
/* Stop and reset hw */
flush(dws);
spi_enable_chip(dws, 0);
dev_err(&dws->master->dev, "%s\n", msg);
dws->cur_msg->state = ERROR_STATE;
tasklet_schedule(&dws->pump_transfers);
}
static void transfer_complete(struct dw_spi *dws)
{
/* Update total byte transfered return count actual bytes read */
dws->cur_msg->actual_length += dws->len;
/* Move to next transfer */
dws->cur_msg->state = next_transfer(dws);
/* Handle end of message */
if (dws->cur_msg->state == DONE_STATE) {
dws->cur_msg->status = 0;
giveback(dws);
} else
tasklet_schedule(&dws->pump_transfers);
}
static irqreturn_t interrupt_transfer(struct dw_spi *dws)
{
u16 irq_status, irq_mask = 0x3f;
irq_status = dw_readw(dws, isr) & irq_mask;
/* Error handling */
if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
dw_readw(dws, txoicr);
dw_readw(dws, rxoicr);
dw_readw(dws, rxuicr);
int_error_stop(dws, "interrupt_transfer: fifo overrun");
return IRQ_HANDLED;
}
/* INT comes from tx */
if (dws->tx && (irq_status & SPI_INT_TXEI)) {
while (dws->tx < dws->tx_end)
dws->write(dws);
if (dws->tx == dws->tx_end) {
spi_mask_intr(dws, SPI_INT_TXEI);
transfer_complete(dws);
}
}
/* INT comes from rx */
if (dws->rx && (irq_status & SPI_INT_RXFI)) {
if (dws->read(dws))
transfer_complete(dws);
}
return IRQ_HANDLED;
}
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
struct dw_spi *dws = dev_id;
if (!dws->cur_msg) {
spi_mask_intr(dws, SPI_INT_TXEI);
/* Never fail */
return IRQ_HANDLED;
}
return dws->transfer_handler(dws);
}
/* Must be called inside pump_transfers() */
static void poll_transfer(struct dw_spi *dws)
{
if (dws->tx) {
while (dws->write(dws))
dws->read(dws);
}
dws->read(dws);
transfer_complete(dws);
}
static void dma_transfer(struct dw_spi *dws, int cs_change)
{
}
static void pump_transfers(unsigned long data)
{
struct dw_spi *dws = (struct dw_spi *)data;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct spi_device *spi = NULL;
struct chip_data *chip = NULL;
u8 bits = 0;
u8 imask = 0;
u8 cs_change = 0;
u16 clk_div = 0;
u32 speed = 0;
u32 cr0 = 0;
/* Get current state information */
message = dws->cur_msg;
transfer = dws->cur_transfer;
chip = dws->cur_chip;
spi = message->spi;
if (message->state == ERROR_STATE) {
message->status = -EIO;
goto early_exit;
}
/* Handle end of message */
if (message->state == DONE_STATE) {
message->status = 0;
goto early_exit;
}
/* Delay if requested at end of transfer*/
if (message->state == RUNNING_STATE) {
previous = list_entry(transfer->transfer_list.prev,
struct spi_transfer,
transfer_list);
if (previous->delay_usecs)
udelay(previous->delay_usecs);
}
dws->n_bytes = chip->n_bytes;
dws->dma_width = chip->dma_width;
dws->cs_control = chip->cs_control;
dws->rx_dma = transfer->rx_dma;
dws->tx_dma = transfer->tx_dma;
dws->tx = (void *)transfer->tx_buf;
dws->tx_end = dws->tx + transfer->len;
dws->rx = transfer->rx_buf;
dws->rx_end = dws->rx + transfer->len;
dws->write = dws->tx ? chip->write : null_writer;
dws->read = dws->rx ? chip->read : null_reader;
dws->cs_change = transfer->cs_change;
dws->len = dws->cur_transfer->len;
if (chip != dws->prev_chip)
cs_change = 1;
cr0 = chip->cr0;
/* Handle per transfer options for bpw and speed */
if (transfer->speed_hz) {
speed = chip->speed_hz;
if (transfer->speed_hz != speed) {
speed = transfer->speed_hz;
if (speed > dws->max_freq) {
printk(KERN_ERR "MRST SPI0: unsupported"
"freq: %dHz\n", speed);
message->status = -EIO;
goto early_exit;
}
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
clk_div = (clk_div >> 1) << 1;
chip->speed_hz = speed;
chip->clk_div = clk_div;
}
}
if (transfer->bits_per_word) {
bits = transfer->bits_per_word;
switch (bits) {
case 8:
dws->n_bytes = 1;
dws->dma_width = 1;
dws->read = (dws->read != null_reader) ?
u8_reader : null_reader;
dws->write = (dws->write != null_writer) ?
u8_writer : null_writer;
break;
case 16:
dws->n_bytes = 2;
dws->dma_width = 2;
dws->read = (dws->read != null_reader) ?
u16_reader : null_reader;
dws->write = (dws->write != null_writer) ?
u16_writer : null_writer;
break;
default:
printk(KERN_ERR "MRST SPI0: unsupported bits:"
"%db\n", bits);
message->status = -EIO;
goto early_exit;
}
cr0 = (bits - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
}
message->state = RUNNING_STATE;
/* Check if current transfer is a DMA transaction */
dws->dma_mapped = map_dma_buffers(dws);
if (!dws->dma_mapped && !chip->poll_mode) {
if (dws->rx)
imask |= SPI_INT_RXFI;
if (dws->tx)
imask |= SPI_INT_TXEI;
dws->transfer_handler = interrupt_transfer;
}
/*
* Reprogram registers only if
* 1. chip select changes
* 2. clk_div is changed
* 3. control value changes
*/
if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
spi_enable_chip(dws, 0);
if (dw_readw(dws, ctrl0) != cr0)
dw_writew(dws, ctrl0, cr0);
/* Set the interrupt mask, for poll mode just diable all int */
spi_mask_intr(dws, 0xff);
if (!chip->poll_mode)
spi_umask_intr(dws, imask);
spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
spi_chip_sel(dws, spi->chip_select);
spi_enable_chip(dws, 1);
if (cs_change)
dws->prev_chip = chip;
}
if (dws->dma_mapped)
dma_transfer(dws, cs_change);
if (chip->poll_mode)
poll_transfer(dws);
return;
early_exit:
giveback(dws);
return;
}
static void pump_messages(struct work_struct *work)
{
struct dw_spi *dws =
container_of(work, struct dw_spi, pump_messages);
unsigned long flags;
/* Lock queue and check for queue work */
spin_lock_irqsave(&dws->lock, flags);
if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
dws->busy = 0;
spin_unlock_irqrestore(&dws->lock, flags);
return;
}
/* Make sure we are not already running a message */
if (dws->cur_msg) {
spin_unlock_irqrestore(&dws->lock, flags);
return;
}
/* Extract head of queue */
dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
list_del_init(&dws->cur_msg->queue);
/* Initial message state*/
dws->cur_msg->state = START_STATE;
dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
struct spi_transfer,
transfer_list);
dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
/* Mark as busy and launch transfers */
tasklet_schedule(&dws->pump_transfers);
dws->busy = 1;
spin_unlock_irqrestore(&dws->lock, flags);
}
/* spi_device use this to queue in their spi_msg */
static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
{
struct dw_spi *dws = spi_master_get_devdata(spi->master);
unsigned long flags;
spin_lock_irqsave(&dws->lock, flags);
if (dws->run == QUEUE_STOPPED) {
spin_unlock_irqrestore(&dws->lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
msg->state = START_STATE;
list_add_tail(&msg->queue, &dws->queue);
if (dws->run == QUEUE_RUNNING && !dws->busy) {
if (dws->cur_transfer || dws->cur_msg)
queue_work(dws->workqueue,
&dws->pump_messages);
else {
/* If no other data transaction in air, just go */
spin_unlock_irqrestore(&dws->lock, flags);
pump_messages(&dws->pump_messages);
return 0;
}
}
spin_unlock_irqrestore(&dws->lock, flags);
return 0;
}
/* This may be called twice for each spi dev */
static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
return -EINVAL;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (!chip) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->cs_control = null_cs_control;
chip->enable_dma = 0;
}
/*
* Protocol drivers may change the chip settings, so...
* if chip_info exists, use it
*/
chip_info = spi->controller_data;
/* chip_info doesn't always exist */
if (chip_info) {
if (chip_info->cs_control)
chip->cs_control = chip_info->cs_control;
chip->poll_mode = chip_info->poll_mode;
chip->type = chip_info->type;
chip->rx_threshold = 0;
chip->tx_threshold = 0;
chip->enable_dma = chip_info->enable_dma;
}
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
chip->dma_width = 1;
chip->read = u8_reader;
chip->write = u8_writer;
} else if (spi->bits_per_word <= 16) {
chip->n_bytes = 2;
chip->dma_width = 2;
chip->read = u16_reader;
chip->write = u16_writer;
} else {
/* Never take >16b case for MRST SPIC */
dev_err(&spi->dev, "invalid wordsize\n");
return -EINVAL;
}
chip->bits_per_word = spi->bits_per_word;
chip->speed_hz = spi->max_speed_hz;
if (chip->speed_hz)
chip->clk_div = 25000000 / chip->speed_hz;
else
chip->clk_div = 8; /* default value */
chip->tmode = 0; /* Tx & Rx */
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
chip->cr0 = (chip->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
| (chip->tmode << SPI_TMOD_OFFSET);
spi_set_ctldata(spi, chip);
return 0;
}
static void dw_spi_cleanup(struct spi_device *spi)
{
struct chip_data *chip = spi_get_ctldata(spi);
kfree(chip);
}
static int __init init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
spin_lock_init(&dws->lock);
dws->run = QUEUE_STOPPED;
dws->busy = 0;
tasklet_init(&dws->pump_transfers,
pump_transfers, (unsigned long)dws);
INIT_WORK(&dws->pump_messages, pump_messages);
dws->workqueue = create_singlethread_workqueue(
dev_name(dws->master->dev.parent));
if (dws->workqueue == NULL)
return -EBUSY;
return 0;
}
static int start_queue(struct dw_spi *dws)
{
unsigned long flags;
spin_lock_irqsave(&dws->lock, flags);
if (dws->run == QUEUE_RUNNING || dws->busy) {
spin_unlock_irqrestore(&dws->lock, flags);
return -EBUSY;
}
dws->run = QUEUE_RUNNING;
dws->cur_msg = NULL;
dws->cur_transfer = NULL;
dws->cur_chip = NULL;
dws->prev_chip = NULL;
spin_unlock_irqrestore(&dws->lock, flags);
queue_work(dws->workqueue, &dws->pump_messages);
return 0;
}
static int stop_queue(struct dw_spi *dws)
{
unsigned long flags;
unsigned limit = 50;
int status = 0;
spin_lock_irqsave(&dws->lock, flags);
dws->run = QUEUE_STOPPED;
while (!list_empty(&dws->queue) && dws->busy && limit--) {
spin_unlock_irqrestore(&dws->lock, flags);
msleep(10);
spin_lock_irqsave(&dws->lock, flags);
}
if (!list_empty(&dws->queue) || dws->busy)
status = -EBUSY;
spin_unlock_irqrestore(&dws->lock, flags);
return status;
}
static int destroy_queue(struct dw_spi *dws)
{
int status;
status = stop_queue(dws);
if (status != 0)
return status;
destroy_workqueue(dws->workqueue);
return 0;
}
/* Restart the controller, disable all interrupts, clean rx fifo */
static void spi_hw_init(struct dw_spi *dws)
{
spi_enable_chip(dws, 0);
spi_mask_intr(dws, 0xff);
spi_enable_chip(dws, 1);
flush(dws);
}
int __devinit dw_spi_add_host(struct dw_spi *dws)
{
struct spi_master *master;
int ret;
BUG_ON(dws == NULL);
master = spi_alloc_master(dws->parent_dev, 0);
if (!master) {
ret = -ENOMEM;
goto exit;
}
dws->master = master;
dws->type = SSI_MOTO_SPI;
dws->prev_chip = NULL;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
ret = request_irq(dws->irq, dw_spi_irq, 0,
"dw_spi", dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
goto err_free_master;
}
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
master->cleanup = dw_spi_cleanup;
master->setup = dw_spi_setup;
master->transfer = dw_spi_transfer;
dws->dma_inited = 0;
/* Basic HW init */
spi_hw_init(dws);
/* Initial and start queue */
ret = init_queue(dws);
if (ret) {
dev_err(&master->dev, "problem initializing queue\n");
goto err_diable_hw;
}
ret = start_queue(dws);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
goto err_diable_hw;
}
spi_master_set_devdata(master, dws);
ret = spi_register_master(master);
if (ret) {
dev_err(&master->dev, "problem registering spi master\n");
goto err_queue_alloc;
}
mrst_spi_debugfs_init(dws);
return 0;
err_queue_alloc:
destroy_queue(dws);
err_diable_hw:
spi_enable_chip(dws, 0);
free_irq(dws->irq, dws);
err_free_master:
spi_master_put(master);
exit:
return ret;
}
EXPORT_SYMBOL(dw_spi_add_host);
void __devexit dw_spi_remove_host(struct dw_spi *dws)
{
int status = 0;
if (!dws)
return;
mrst_spi_debugfs_remove(dws);
/* Remove the queue */
status = destroy_queue(dws);
if (status != 0)
dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
"complete, message memory not freed\n");
spi_enable_chip(dws, 0);
/* Disable clk */
spi_set_clk(dws, 0);
free_irq(dws->irq, dws);
/* Disconnect from the SPI framework */
spi_unregister_master(dws->master);
}
int dw_spi_suspend_host(struct dw_spi *dws)
{
int ret = 0;
ret = stop_queue(dws);
if (ret)
return ret;
spi_enable_chip(dws, 0);
spi_set_clk(dws, 0);
return ret;
}
EXPORT_SYMBOL(dw_spi_suspend_host);
int dw_spi_resume_host(struct dw_spi *dws)
{
int ret;
spi_hw_init(dws);
ret = start_queue(dws);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
return ret;
}
EXPORT_SYMBOL(dw_spi_resume_host);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
MODULE_LICENSE("GPL v2");
/*
* mrst_spi_pci.c - PCI interface driver for DW SPI Core
*
* Copyright (c) 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spi/dw_spi.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "dw_spi_pci"
struct dw_spi_pci {
struct pci_dev *pdev;
struct dw_spi dws;
};
static int __devinit spi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct dw_spi_pci *dwpci;
struct dw_spi *dws;
int pci_bar = 0;
int ret;
printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
ret = pci_enable_device(pdev);
if (ret)
return ret;
dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
if (!dwpci) {
ret = -ENOMEM;
goto err_disable;
}
dwpci->pdev = pdev;
dws = &dwpci->dws;
/* Get basic io resource and map it */
dws->paddr = pci_resource_start(pdev, pci_bar);
dws->iolen = pci_resource_len(pdev, pci_bar);
ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
if (ret)
goto err_kfree;
dws->regs = ioremap_nocache((unsigned long)dws->paddr,
pci_resource_len(pdev, pci_bar));
if (!dws->regs) {
ret = -ENOMEM;
goto err_release_reg;
}
dws->parent_dev = &pdev->dev;
dws->bus_num = 0;
dws->num_cs = 4;
dws->max_freq = 25000000; /* for Moorestwon */
dws->irq = pdev->irq;
ret = dw_spi_add_host(dws);
if (ret)
goto err_unmap;
/* PCI hook and SPI hook use the same drv data */
pci_set_drvdata(pdev, dwpci);
return 0;
err_unmap:
iounmap(dws->regs);
err_release_reg:
pci_release_region(pdev, pci_bar);
err_kfree:
kfree(dwpci);
err_disable:
pci_disable_device(pdev);
return ret;
}
static void __devexit spi_pci_remove(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, NULL);
iounmap(dwpci->dws.regs);
pci_release_region(pdev, 0);
kfree(dwpci);
pci_disable_device(pdev);
}
#ifdef CONFIG_PM
static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
int ret;
ret = dw_spi_suspend_host(&dwpci->dws);
if (ret)
return ret;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return ret;
}
static int spi_resume(struct pci_dev *pdev)
{
struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
int ret;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
return dw_spi_resume_host(&dwpci->dws);
}
#else
#define spi_suspend NULL
#define spi_resume NULL
#endif
static const struct pci_device_id pci_ids[] __devinitdata = {
/* Intel Moorestown platform SPI controller 0 */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
{},
};
static struct pci_driver dw_spi_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = spi_pci_probe,
.remove = __devexit_p(spi_pci_remove),
.suspend = spi_suspend,
.resume = spi_resume,
};
static int __init mrst_spi_init(void)
{
return pci_register_driver(&dw_spi_driver);
}
static void __exit mrst_spi_exit(void)
{
pci_unregister_driver(&dw_spi_driver);
}
module_init(mrst_spi_init);
module_exit(mrst_spi_exit);
MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
MODULE_LICENSE("GPL v2");
......@@ -1294,7 +1294,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1));
drv_data->regs_base = ioremap(res->start, resource_size(res));
if (drv_data->regs_base == NULL) {
dev_err(dev, "Cannot map IO\n");
status = -ENXIO;
......
......@@ -1013,7 +1013,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
init_completion(&mpc8xxx_spi->done);
mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->base == NULL) {
ret = -ENOMEM;
goto err_ioremap;
......
/* linux/drivers/spi/spi_s3c24xx.c
*
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Copyright 2006-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
......@@ -28,6 +28,11 @@
#include <plat/regs-spi.h>
#include <mach/spi.h>
#include <plat/fiq.h>
#include <asm/fiq.h>
#include "spi_s3c24xx_fiq.h"
/**
* s3c24xx_spi_devstate - per device data
* @hz: Last frequency calculated for @sppre field.
......@@ -42,6 +47,13 @@ struct s3c24xx_spi_devstate {
u8 sppre;
};
enum spi_fiq_mode {
FIQ_MODE_NONE = 0,
FIQ_MODE_TX = 1,
FIQ_MODE_RX = 2,
FIQ_MODE_TXRX = 3,
};
struct s3c24xx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
......@@ -52,6 +64,11 @@ struct s3c24xx_spi {
int len;
int count;
struct fiq_handler fiq_handler;
enum spi_fiq_mode fiq_mode;
unsigned char fiq_inuse;
unsigned char fiq_claimed;
void (*set_cs)(struct s3c2410_spi_info *spi,
int cs, int pol);
......@@ -67,6 +84,7 @@ struct s3c24xx_spi {
struct s3c2410_spi_info *pdata;
};
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
......@@ -127,7 +145,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
}
if (spi->mode != cs->mode) {
u8 spcon = SPCON_DEFAULT;
u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
if (spi->mode & SPI_CPHA)
spcon |= S3C2410_SPCON_CPHA_FMTB;
......@@ -214,13 +232,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
return hw->tx ? hw->tx[count] : 0;
}
#ifdef CONFIG_SPI_S3C24XX_FIQ
/* Support for FIQ based pseudo-DMA to improve the transfer speed.
*
* This code uses the assembly helper in spi_s3c24xx_spi.S which is
* used by the FIQ core to move data between main memory and the peripheral
* block. Since this is code running on the processor, there is no problem
* with cache coherency of the buffers, so we can use any buffer we like.
*/
/**
* struct spi_fiq_code - FIQ code and header
* @length: The length of the code fragment, excluding this header.
* @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
* @data: The code itself to install as a FIQ handler.
*/
struct spi_fiq_code {
u32 length;
u32 ack_offset;
u8 data[0];
};
extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
/**
* ack_bit - turn IRQ into IRQ acknowledgement bit
* @irq: The interrupt number
*
* Returns the bit to write to the interrupt acknowledge register.
*/
static inline u32 ack_bit(unsigned int irq)
{
return 1 << (irq - IRQ_EINT0);
}
/**
* s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
* @hw: The hardware state.
*
* Claim the FIQ handler (only one can be active at any one time) and
* then setup the correct transfer code for this transfer.
*
* This call updates all the necessary state information if sucessful,
* so the caller does not need to do anything more than start the transfer
* as normal, since the IRQ will have been re-routed to the FIQ handler.
*/
void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
{
struct pt_regs regs;
enum spi_fiq_mode mode;
struct spi_fiq_code *code;
int ret;
if (!hw->fiq_claimed) {
/* try and claim fiq if we haven't got it, and if not
* then return and simply use another transfer method */
ret = claim_fiq(&hw->fiq_handler);
if (ret)
return;
}
if (hw->tx && !hw->rx)
mode = FIQ_MODE_TX;
else if (hw->rx && !hw->tx)
mode = FIQ_MODE_RX;
else
mode = FIQ_MODE_TXRX;
regs.uregs[fiq_rspi] = (long)hw->regs;
regs.uregs[fiq_rrx] = (long)hw->rx;
regs.uregs[fiq_rtx] = (long)hw->tx + 1;
regs.uregs[fiq_rcount] = hw->len - 1;
regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
set_fiq_regs(&regs);
if (hw->fiq_mode != mode) {
u32 *ack_ptr;
hw->fiq_mode = mode;
switch (mode) {
case FIQ_MODE_TX:
code = &s3c24xx_spi_fiq_tx;
break;
case FIQ_MODE_RX:
code = &s3c24xx_spi_fiq_rx;
break;
case FIQ_MODE_TXRX:
code = &s3c24xx_spi_fiq_txrx;
break;
default:
code = NULL;
}
BUG_ON(!code);
ack_ptr = (u32 *)&code->data[code->ack_offset];
*ack_ptr = ack_bit(hw->irq);
set_fiq_handler(&code->data, code->length);
}
s3c24xx_set_fiq(hw->irq, true);
hw->fiq_mode = mode;
hw->fiq_inuse = 1;
}
/**
* s3c24xx_spi_fiqop - FIQ core code callback
* @pw: Data registered with the handler
* @release: Whether this is a release or a return.
*
* Called by the FIQ code when another module wants to use the FIQ, so
* return whether we are currently using this or not and then update our
* internal state.
*/
static int s3c24xx_spi_fiqop(void *pw, int release)
{
struct s3c24xx_spi *hw = pw;
int ret = 0;
if (release) {
if (hw->fiq_inuse)
ret = -EBUSY;
/* note, we do not need to unroute the FIQ, as the FIQ
* vector code de-routes it to signal the end of transfer */
hw->fiq_mode = FIQ_MODE_NONE;
hw->fiq_claimed = 0;
} else {
hw->fiq_claimed = 1;
}
return ret;
}
/**
* s3c24xx_spi_initfiq - setup the information for the FIQ core
* @hw: The hardware state.
*
* Setup the fiq_handler block to pass to the FIQ core.
*/
static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
{
hw->fiq_handler.dev_id = hw;
hw->fiq_handler.name = dev_name(hw->dev);
hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
}
/**
* s3c24xx_spi_usefiq - return if we should be using FIQ.
* @hw: The hardware state.
*
* Return true if the platform data specifies whether this channel is
* allowed to use the FIQ.
*/
static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
{
return hw->pdata->use_fiq;
}
/**
* s3c24xx_spi_usingfiq - return if channel is using FIQ
* @spi: The hardware state.
*
* Return whether the channel is currently using the FIQ (separate from
* whether the FIQ is claimed).
*/
static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
{
return spi->fiq_inuse;
}
#else
static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
#endif /* CONFIG_SPI_S3C24XX_FIQ */
static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct s3c24xx_spi *hw = to_hw(spi);
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->len = t->len;
......@@ -228,11 +429,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
init_completion(&hw->done);
hw->fiq_inuse = 0;
if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
s3c24xx_spi_tryfiq(hw);
/* send the first byte */
writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
wait_for_completion(&hw->done);
return hw->count;
}
......@@ -254,17 +458,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
goto irq_done;
}
hw->count++;
if (!s3c24xx_spi_usingfiq(hw)) {
hw->count++;
if (hw->rx)
hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
if (hw->rx)
hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
count++;
count++;
if (count < hw->len)
writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
else
complete(&hw->done);
} else {
hw->count = hw->len;
hw->fiq_inuse = 0;
if (hw->rx)
hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
if (count < hw->len)
writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
else
complete(&hw->done);
}
irq_done:
return IRQ_HANDLED;
......@@ -322,6 +536,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
init_completion(&hw->done);
/* initialise fiq handler */
s3c24xx_spi_initfiq(hw);
/* setup the master state. */
/* the spi->mode bits understood by this driver: */
......
/* linux/drivers/spi/spi_s3c24xx_fiq.S
*
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX SPI - FIQ pseudo-DMA transfer code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <mach/map.h>
#include <mach/regs-irq.h>
#include <plat/regs-spi.h>
#include "spi_s3c24xx_fiq.h"
.text
@ entry to these routines is as follows, with the register names
@ defined in fiq.h so that they can be shared with the C files which
@ setup the calling registers.
@
@ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
@ fiq_rtmp Temporary register to hold tx/rx data
@ fiq_rspi The base of the SPI register block
@ fiq_rtx The tx buffer pointer
@ fiq_rrx The rx buffer pointer
@ fiq_rcount The number of bytes to move
@ each entry starts with a word entry of how long it is
@ and an offset to the irq acknowledgment word
ENTRY(s3c24xx_spi_fiq_rx)
s3c24xx_spi_fix_rx:
.word fiq_rx_end - fiq_rx_start
.word fiq_rx_irq_ack - fiq_rx_start
fiq_rx_start:
ldr fiq_rtmp, fiq_rx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
mov fiq_rtmp, #0xff
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subnes pc, lr, #4 @@ return, still have work to do
@@ set IRQ controller so that next op will trigger IRQ
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_rx_irq_ack:
.word 0
fiq_rx_end:
ENTRY(s3c24xx_spi_fiq_txrx)
s3c24xx_spi_fiq_txrx:
.word fiq_txrx_end - fiq_txrx_start
.word fiq_txrx_irq_ack - fiq_txrx_start
fiq_txrx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
strb fiq_rtmp, [ fiq_rrx ], #1
ldr fiq_rtmp, fiq_txrx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subnes pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_txrx_irq_ack:
.word 0
fiq_txrx_end:
ENTRY(s3c24xx_spi_fiq_tx)
s3c24xx_spi_fix_tx:
.word fiq_tx_end - fiq_tx_start
.word fiq_tx_irq_ack - fiq_tx_start
fiq_tx_start:
ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
ldr fiq_rtmp, fiq_tx_irq_ack
str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
ldrb fiq_rtmp, [ fiq_rtx ], #1
strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
subs fiq_rcount, fiq_rcount, #1
subnes pc, lr, #4 @@ return, still have work to do
mov fiq_rtmp, #0
str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
subs pc, lr, #4
fiq_tx_irq_ack:
.word 0
fiq_tx_end:
.end
/* linux/drivers/spi/spi_s3c24xx_fiq.h
*
* Copyright 2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX SPI - FIQ pseudo-DMA transfer support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* We have R8 through R13 to play with */
#ifdef __ASSEMBLY__
#define __REG_NR(x) r##x
#else
#define __REG_NR(x) (x)
#endif
#define fiq_rspi __REG_NR(8)
#define fiq_rtmp __REG_NR(9)
#define fiq_rrx __REG_NR(10)
#define fiq_rtx __REG_NR(11)
#define fiq_rcount __REG_NR(12)
#define fiq_rirq __REG_NR(13)
/* linux/drivers/spi/spi_s3c64xx.c
*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <mach/dma.h>
#include <plat/spi.h>
/* Registers and bit-fields */
#define S3C64XX_SPI_CH_CFG 0x00
#define S3C64XX_SPI_CLK_CFG 0x04
#define S3C64XX_SPI_MODE_CFG 0x08
#define S3C64XX_SPI_SLAVE_SEL 0x0C
#define S3C64XX_SPI_INT_EN 0x10
#define S3C64XX_SPI_STATUS 0x14
#define S3C64XX_SPI_TX_DATA 0x18
#define S3C64XX_SPI_RX_DATA 0x1C
#define S3C64XX_SPI_PACKET_CNT 0x20
#define S3C64XX_SPI_PENDING_CLR 0x24
#define S3C64XX_SPI_SWAP_CFG 0x28
#define S3C64XX_SPI_FB_CLK 0x2C
#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
#define S3C64XX_SPI_CH_SW_RST (1<<5)
#define S3C64XX_SPI_CH_SLAVE (1<<4)
#define S3C64XX_SPI_CPOL_L (1<<3)
#define S3C64XX_SPI_CPHA_B (1<<2)
#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
#define S3C64XX_SPI_PSR_MASK 0xff
#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
#define S3C64XX_SPI_MODE_4BURST (1<<0)
#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
(c)->regs + S3C64XX_SPI_SLAVE_SEL)
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
#define S3C64XX_SPI_FBCLK_MSK (3<<0)
#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
(((i)->fifo_lvl_mask + 1))) \
? 1 : 0)
#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
(((i)->fifo_lvl_mask + 1) << 1)) \
? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define SUSPND (1<<0)
#define SPIBUSY (1<<1)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
* @master: Pointer to the SPI Protocol master.
* @workqueue: Work queue for the SPI xfer requests.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
* @work: Work
* @queue: To log SPI xfer requests.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx.
* @tx_dmach: Controller's DMA channel for Tx.
* @sfr_start: BUS address of SPI controller regs.
* @regs: Pointer to ioremap'ed controller registers.
* @xfer_completion: To indicate completion of xfer task.
* @cur_mode: Stores the active configuration of the controller.
* @cur_bpw: Stores the active bits per word settings.
* @cur_speed: Stores the active xfer clock speed.
*/
struct s3c64xx_spi_driver_data {
void __iomem *regs;
struct clk *clk;
struct platform_device *pdev;
struct spi_master *master;
struct workqueue_struct *workqueue;
struct s3c64xx_spi_cntrlr_info *cntrlr_info;
struct spi_device *tgl_spi;
struct work_struct work;
struct list_head queue;
spinlock_t lock;
enum dma_ch rx_dmach;
enum dma_ch tx_dmach;
unsigned long sfr_start;
struct completion xfer_completion;
unsigned state;
unsigned cur_mode, cur_bpw;
unsigned cur_speed;
};
static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
.name = "samsung-spi-dma",
};
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val |= S3C64XX_SPI_CH_SW_RST;
val &= ~S3C64XX_SPI_CH_HS_EN;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Flush TxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
} while (TX_FIFO_LVL(val, sci) && loops--);
/* Flush RxFIFO*/
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
if (RX_FIFO_LVL(val, sci))
readl(regs + S3C64XX_SPI_RX_DATA);
else
break;
} while (loops--);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~S3C64XX_SPI_CH_SW_RST;
writel(val, regs + S3C64XX_SPI_CH_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
writel(val, regs + S3C64XX_SPI_CH_CFG);
}
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
} else {
/* Always shift in data in FIFO, even if xfer is Tx only,
* this helps setting PCKT_CNT value for generating clocks
* as exactly needed.
*/
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
}
if (xfer->tx_buf != NULL) {
sdd->state |= TXBUSY;
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
s3c2410_dma_config(sdd->tx_dmach, 1);
s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
xfer->tx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
} else {
unsigned char *buf = (unsigned char *) xfer->tx_buf;
int i = 0;
while (i < xfer->len)
writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
}
}
if (xfer->rx_buf != NULL) {
sdd->state |= RXBUSY;
if (sci->high_speed && sdd->cur_speed >= 30000000UL
&& !(sdd->cur_mode & SPI_CPHA))
chcfg |= S3C64XX_SPI_CH_HS_EN;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
chcfg |= S3C64XX_SPI_CH_RXCH_ON;
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
s3c2410_dma_config(sdd->rx_dmach, 1);
s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
xfer->rx_dma, xfer->len);
s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
}
}
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
cs = sdd->tgl_spi->controller_data;
cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
cs = spi->controller_data;
cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long val;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 5; /* some tolerance */
if (dma_mode) {
val = msecs_to_jiffies(ms) + 10;
val = wait_for_completion_timeout(&sdd->xfer_completion, val);
} else {
val = msecs_to_loops(ms);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
}
if (!val)
return -EIO;
if (dma_mode) {
u32 status;
/*
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if (xfer->rx_buf == NULL) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
while ((TX_FIFO_LVL(status, sci)
|| !S3C64XX_SPI_ST_TX_DONE(status, sci))
&& --val) {
cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
}
if (!val)
return -EIO;
}
} else {
unsigned char *buf;
int i;
/* If it was only Tx */
if (xfer->rx_buf == NULL) {
sdd->state &= ~TXBUSY;
return 0;
}
i = 0;
buf = xfer->rx_buf;
while (i < xfer->len)
buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
sdd->state &= ~RXBUSY;
}
return 0;
}
static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 val;
/* Disable Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
/* Set Polarity and Phase */
val = readl(regs + S3C64XX_SPI_CH_CFG);
val &= ~(S3C64XX_SPI_CH_SLAVE |
S3C64XX_SPI_CPOL_L |
S3C64XX_SPI_CPHA_B);
if (sdd->cur_mode & SPI_CPOL)
val |= S3C64XX_SPI_CPOL_L;
if (sdd->cur_mode & SPI_CPHA)
val |= S3C64XX_SPI_CPHA_B;
writel(val, regs + S3C64XX_SPI_CH_CFG);
/* Set Channel & DMA Mode */
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
| S3C64XX_SPI_MODE_CH_TSZ_MASK);
switch (sdd->cur_bpw) {
case 32:
val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
break;
case 16:
val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
break;
default:
val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
break;
}
val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
writel(val, regs + S3C64XX_SPI_MODE_CFG);
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_PSR_MASK;
val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
& S3C64XX_SPI_PSR_MASK);
writel(val, regs + S3C64XX_SPI_CLK_CFG);
/* Enable Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
}
void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
if (res == S3C2410_RES_OK)
sdd->state &= ~RXBUSY;
else
dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
/* If the other done */
if (!(sdd->state & TXBUSY))
complete(&sdd->xfer_completion);
spin_unlock_irqrestore(&sdd->lock, flags);
}
void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
int size, enum s3c2410_dma_buffresult res)
{
struct s3c64xx_spi_driver_data *sdd = buf_id;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
if (res == S3C2410_RES_OK)
sdd->state &= ~TXBUSY;
else
dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
/* If the other done */
if (!(sdd->state & RXBUSY))
complete(&sdd->xfer_completion);
spin_unlock_irqrestore(&sdd->lock, flags);
}
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (msg->is_dma_mapped)
return 0;
/* First mark all xfer unmapped */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
xfer->rx_dma = XFER_DMAADDR_INVALID;
xfer->tx_dma = XFER_DMAADDR_INVALID;
}
/* Map until end or first fail */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->tx_buf != NULL) {
xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
dev_err(dev, "dma_map_single Tx failed\n");
xfer->tx_dma = XFER_DMAADDR_INVALID;
return -ENOMEM;
}
}
if (xfer->rx_buf != NULL) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
xfer->len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) {
dev_err(dev, "dma_map_single Rx failed\n");
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
xfer->tx_dma = XFER_DMAADDR_INVALID;
xfer->rx_dma = XFER_DMAADDR_INVALID;
return -ENOMEM;
}
}
}
return 0;
}
static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (msg->is_dma_mapped)
return;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->rx_buf != NULL
&& xfer->rx_dma != XFER_DMAADDR_INVALID)
dma_unmap_single(dev, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
if (xfer->tx_buf != NULL
&& xfer->tx_dma != XFER_DMAADDR_INVALID)
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
}
}
static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct spi_transfer *xfer;
int status = 0, cs_toggle = 0;
u32 speed;
u8 bpw;
/* If Master's(controller) state differs from that needed by Slave */
if (sdd->cur_speed != spi->max_speed_hz
|| sdd->cur_mode != spi->mode
|| sdd->cur_bpw != spi->bits_per_word) {
sdd->cur_bpw = spi->bits_per_word;
sdd->cur_speed = spi->max_speed_hz;
sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd);
}
/* Map all the transfers if needed */
if (s3c64xx_spi_map_mssg(sdd, msg)) {
dev_err(&spi->dev,
"Xfer: Unable to map message buffers!\n");
status = -ENOMEM;
goto out;
}
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
unsigned long flags;
int use_dma;
INIT_COMPLETION(sdd->xfer_completion);
/* Only BPW and Speed may change across transfers */
bpw = xfer->bits_per_word ? : spi->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
s3c64xx_spi_config(sdd);
}
/* Polling method for xfers not bigger than FIFO capacity */
if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
use_dma = 0;
else
use_dma = 1;
spin_lock_irqsave(&sdd->lock, flags);
/* Pending only which is to be done */
sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY;
enable_datapath(sdd, spi, xfer, use_dma);
/* Slave Select */
enable_cs(sdd, spi);
/* Start the signals */
S3C64XX_SPI_ACT(sdd);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
/* Quiese the signals */
S3C64XX_SPI_DEACT(sdd);
if (status) {
dev_err(&spi->dev, "I/O Error: \
rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
(sdd->state & RXBUSY) ? 'f' : 'p',
(sdd->state & TXBUSY) ? 'f' : 'p',
xfer->len);
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
s3c2410_dma_ctrl(sdd->tx_dmach,
S3C2410_DMAOP_FLUSH);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
s3c2410_dma_ctrl(sdd->rx_dmach,
S3C2410_DMAOP_FLUSH);
}
goto out;
}
if (xfer->delay_usecs)
udelay(xfer->delay_usecs);
if (xfer->cs_change) {
/* Hint that the next mssg is gonna be
for the same device */
if (list_is_last(&xfer->transfer_list,
&msg->transfers))
cs_toggle = 1;
else
disable_cs(sdd, spi);
}
msg->actual_length += xfer->len;
flush_fifo(sdd);
}
out:
if (!cs_toggle || status)
disable_cs(sdd, spi);
else
sdd->tgl_spi = spi;
s3c64xx_spi_unmap_mssg(sdd, msg);
msg->status = status;
if (msg->complete)
msg->complete(msg->context);
}
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
if (s3c2410_dma_request(sdd->rx_dmach,
&s3c64xx_spi_dma_client, NULL) < 0) {
dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
return 0;
}
s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
sdd->sfr_start + S3C64XX_SPI_RX_DATA);
if (s3c2410_dma_request(sdd->tx_dmach,
&s3c64xx_spi_dma_client, NULL) < 0) {
dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
return 0;
}
s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
sdd->sfr_start + S3C64XX_SPI_TX_DATA);
return 1;
}
static void s3c64xx_spi_work(struct work_struct *work)
{
struct s3c64xx_spi_driver_data *sdd = container_of(work,
struct s3c64xx_spi_driver_data, work);
unsigned long flags;
/* Acquire DMA channels */
while (!acquire_dma(sdd))
msleep(10);
spin_lock_irqsave(&sdd->lock, flags);
while (!list_empty(&sdd->queue)
&& !(sdd->state & SUSPND)) {
struct spi_message *msg;
msg = container_of(sdd->queue.next, struct spi_message, queue);
list_del_init(&msg->queue);
/* Set Xfer busy flag */
sdd->state |= SPIBUSY;
spin_unlock_irqrestore(&sdd->lock, flags);
handle_msg(sdd, msg);
spin_lock_irqsave(&sdd->lock, flags);
sdd->state &= ~SPIBUSY;
}
spin_unlock_irqrestore(&sdd->lock, flags);
/* Free DMA channels */
s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
}
static int s3c64xx_spi_transfer(struct spi_device *spi,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd;
unsigned long flags;
sdd = spi_master_get_devdata(spi->master);
spin_lock_irqsave(&sdd->lock, flags);
if (sdd->state & SUSPND) {
spin_unlock_irqrestore(&sdd->lock, flags);
return -ESHUTDOWN;
}
msg->status = -EINPROGRESS;
msg->actual_length = 0;
list_add_tail(&msg->queue, &sdd->queue);
queue_work(sdd->workqueue, &sdd->work);
spin_unlock_irqrestore(&sdd->lock, flags);
return 0;
}
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data-structure.
* The controller is actually configured only just before we
* get a message to transfer.
*/
static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_cntrlr_info *sci;
struct spi_message *msg;
u32 psr, speed;
unsigned long flags;
int err = 0;
if (cs == NULL || cs->set_level == NULL) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
sdd = spi_master_get_devdata(spi->master);
sci = sdd->cntrlr_info;
spin_lock_irqsave(&sdd->lock, flags);
list_for_each_entry(msg, &sdd->queue, queue) {
/* Is some mssg is already queued for this device */
if (msg->spi == spi) {
dev_err(&spi->dev,
"setup: attempt while mssg in queue!\n");
spin_unlock_irqrestore(&sdd->lock, flags);
return -EBUSY;
}
}
if (sdd->state & SUSPND) {
spin_unlock_irqrestore(&sdd->lock, flags);
dev_err(&spi->dev,
"setup: SPI-%d not active!\n", spi->master->bus_num);
return -ESHUTDOWN;
}
spin_unlock_irqrestore(&sdd->lock, flags);
if (spi->bits_per_word != 8
&& spi->bits_per_word != 16
&& spi->bits_per_word != 32) {
dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
spi->bits_per_word);
err = -EINVAL;
goto setup_exit;
}
/* Check if we can provide the requested rate */
speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
if (spi->max_speed_hz > speed)
spi->max_speed_hz = speed;
psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
psr &= S3C64XX_SPI_PSR_MASK;
if (psr == S3C64XX_SPI_PSR_MASK)
psr--;
speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz < speed) {
if (psr+1 < S3C64XX_SPI_PSR_MASK) {
psr++;
} else {
err = -EINVAL;
goto setup_exit;
}
}
speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
if (spi->max_speed_hz >= speed)
spi->max_speed_hz = speed;
else
err = -EINVAL;
setup_exit:
/* setup() returns with device de-selected */
disable_cs(sdd, spi);
return err;
}
static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
{
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned int val;
sdd->cur_speed = 0;
S3C64XX_SPI_DEACT(sdd);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
/* Clear any irq pending bits */
writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
val = readl(regs + S3C64XX_SPI_MODE_CFG);
val &= ~S3C64XX_SPI_MODE_4BURST;
val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
writel(val, regs + S3C64XX_SPI_MODE_CFG);
flush_fifo(sdd);
}
static int __init s3c64xx_spi_probe(struct platform_device *pdev)
{
struct resource *mem_res, *dmatx_res, *dmarx_res;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_cntrlr_info *sci;
struct spi_master *master;
int ret;
if (pdev->id < 0) {
dev_err(&pdev->dev,
"Invalid platform device id-%d\n", pdev->id);
return -ENODEV;
}
if (pdev->dev.platform_data == NULL) {
dev_err(&pdev->dev, "platform_data missing!\n");
return -ENODEV;
}
/* Check for availability of necessary resource */
dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (dmatx_res == NULL) {
dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
return -ENXIO;
}
dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (dmarx_res == NULL) {
dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
return -ENXIO;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem_res == NULL) {
dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
return -ENXIO;
}
master = spi_alloc_master(&pdev->dev,
sizeof(struct s3c64xx_spi_driver_data));
if (master == NULL) {
dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
return -ENOMEM;
}
sci = pdev->dev.platform_data;
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
sdd->master = master;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
sdd->tx_dmach = dmatx_res->start;
sdd->rx_dmach = dmarx_res->start;
sdd->cur_bpw = 8;
master->bus_num = pdev->id;
master->setup = s3c64xx_spi_setup;
master->transfer = s3c64xx_spi_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
if (request_mem_region(mem_res->start,
resource_size(mem_res), pdev->name) == NULL) {
dev_err(&pdev->dev, "Req mem region failed\n");
ret = -ENXIO;
goto err0;
}
sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
if (sdd->regs == NULL) {
dev_err(&pdev->dev, "Unable to remap IO\n");
ret = -ENXIO;
goto err1;
}
if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err2;
}
/* Setup clocks */
sdd->clk = clk_get(&pdev->dev, "spi");
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
goto err3;
}
if (clk_enable(sdd->clk)) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
ret = -EBUSY;
goto err4;
}
if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
sci->src_clk = sdd->clk;
else
sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
if (IS_ERR(sci->src_clk)) {
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", sci->src_clk_name);
ret = PTR_ERR(sci->src_clk);
goto err5;
}
if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
sci->src_clk_name);
ret = -EBUSY;
goto err6;
}
sdd->workqueue = create_singlethread_workqueue(
dev_name(master->dev.parent));
if (sdd->workqueue == NULL) {
dev_err(&pdev->dev, "Unable to create workqueue\n");
ret = -ENOMEM;
goto err7;
}
/* Setup Deufult Mode */
s3c64xx_spi_hwinit(sdd, pdev->id);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
INIT_WORK(&sdd->work, s3c64xx_spi_work);
INIT_LIST_HEAD(&sdd->queue);
if (spi_register_master(master)) {
dev_err(&pdev->dev, "cannot register SPI master\n");
ret = -EBUSY;
goto err8;
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
with %d Slaves attached\n",
pdev->id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
sdd->rx_dmach, sdd->tx_dmach);
return 0;
err8:
destroy_workqueue(sdd->workqueue);
err7:
if (sci->src_clk != sdd->clk)
clk_disable(sci->src_clk);
err6:
if (sci->src_clk != sdd->clk)
clk_put(sci->src_clk);
err5:
clk_disable(sdd->clk);
err4:
clk_put(sdd->clk);
err3:
err2:
iounmap((void *) sdd->regs);
err1:
release_mem_region(mem_res->start, resource_size(mem_res));
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return ret;
}
static int s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
struct resource *mem_res;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
sdd->state |= SUSPND;
spin_unlock_irqrestore(&sdd->lock, flags);
while (sdd->state & SPIBUSY)
msleep(10);
spi_unregister_master(master);
destroy_workqueue(sdd->workqueue);
if (sci->src_clk != sdd->clk)
clk_disable(sci->src_clk);
if (sci->src_clk != sdd->clk)
clk_put(sci->src_clk);
clk_disable(sdd->clk);
clk_put(sdd->clk);
iounmap((void *) sdd->regs);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem_res->start, resource_size(mem_res));
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return 0;
}
#ifdef CONFIG_PM
static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
struct s3c64xx_spi_csinfo *cs;
unsigned long flags;
spin_lock_irqsave(&sdd->lock, flags);
sdd->state |= SUSPND;
spin_unlock_irqrestore(&sdd->lock, flags);
while (sdd->state & SPIBUSY)
msleep(10);
/* Disable the clock */
if (sci->src_clk != sdd->clk)
clk_disable(sci->src_clk);
clk_disable(sdd->clk);
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
}
static int s3c64xx_spi_resume(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
unsigned long flags;
sci->cfg_gpio(pdev);
/* Enable the clock */
if (sci->src_clk != sdd->clk)
clk_enable(sci->src_clk);
clk_enable(sdd->clk);
s3c64xx_spi_hwinit(sdd, pdev->id);
spin_lock_irqsave(&sdd->lock, flags);
sdd->state &= ~SUSPND;
spin_unlock_irqrestore(&sdd->lock, flags);
return 0;
}
#else
#define s3c64xx_spi_suspend NULL
#define s3c64xx_spi_resume NULL
#endif /* CONFIG_PM */
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.owner = THIS_MODULE,
},
.remove = s3c64xx_spi_remove,
.suspend = s3c64xx_spi_suspend,
.resume = s3c64xx_spi_resume,
};
MODULE_ALIAS("platform:s3c64xx-spi");
static int __init s3c64xx_spi_init(void)
{
return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
}
module_init(s3c64xx_spi_init);
static void __exit s3c64xx_spi_exit(void)
{
platform_driver_unregister(&s3c64xx_spi_driver);
}
module_exit(s3c64xx_spi_exit);
MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
MODULE_LICENSE("GPL");
......@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
ret = -ENOENT;
goto err1;
}
sp->membase = ioremap(r->start, r->end - r->start + 1);
sp->membase = ioremap(r->start, resource_size(r));
if (!sp->membase) {
ret = -ENXIO;
goto err1;
......
......@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
if (!devm_request_mem_region(&dev->dev,
res->start, res->end - res->start + 1,
if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
"spi_txx9"))
goto exit_busy;
c->membase = devm_ioremap(&dev->dev,
res->start, res->end - res->start + 1);
c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
if (!c->membase)
goto exit_busy;
......
......@@ -53,7 +53,7 @@
#define SPIDEV_MAJOR 153 /* assigned */
#define N_SPI_MINORS 32 /* ... up to 256 */
static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
static DECLARE_BITMAP(minors, N_SPI_MINORS);
/* Bit masks for spi_device.mode management. Note that incorrect
......@@ -558,7 +558,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
static int spidev_probe(struct spi_device *spi)
static int __devinit spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
......@@ -607,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
return status;
}
static int spidev_remove(struct spi_device *spi)
static int __devexit spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
......@@ -629,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
return 0;
}
static struct spi_driver spidev_spi = {
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
......@@ -661,14 +661,14 @@ static int __init spidev_init(void)
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
status = spi_register_driver(&spidev_spi);
status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
......@@ -676,9 +676,9 @@ module_init(spidev_init);
static void __exit spidev_exit(void)
{
spi_unregister_driver(&spidev_spi);
spi_unregister_driver(&spidev_spi_driver);
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
module_exit(spidev_exit);
......
#ifndef DW_SPI_HEADER_H
#define DW_SPI_HEADER_H
#include <linux/io.h>
/* Bit fields in CTRLR0 */
#define SPI_DFS_OFFSET 0
#define SPI_FRF_OFFSET 4
#define SPI_FRF_SPI 0x0
#define SPI_FRF_SSP 0x1
#define SPI_FRF_MICROWIRE 0x2
#define SPI_FRF_RESV 0x3
#define SPI_MODE_OFFSET 6
#define SPI_SCPH_OFFSET 6
#define SPI_SCOL_OFFSET 7
#define SPI_TMOD_OFFSET 8
#define SPI_TMOD_TR 0x0 /* xmit & recv */
#define SPI_TMOD_TO 0x1 /* xmit only */
#define SPI_TMOD_RO 0x2 /* recv only */
#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
#define SPI_SLVOE_OFFSET 10
#define SPI_SRL_OFFSET 11
#define SPI_CFS_OFFSET 12
/* Bit fields in SR, 7 bits */
#define SR_MASK 0x7f /* cover 7 bits */
#define SR_BUSY (1 << 0)
#define SR_TF_NOT_FULL (1 << 1)
#define SR_TF_EMPT (1 << 2)
#define SR_RF_NOT_EMPT (1 << 3)
#define SR_RF_FULL (1 << 4)
#define SR_TX_ERR (1 << 5)
#define SR_DCOL (1 << 6)
/* Bit fields in ISR, IMR, RISR, 7 bits */
#define SPI_INT_TXEI (1 << 0)
#define SPI_INT_TXOI (1 << 1)
#define SPI_INT_RXUI (1 << 2)
#define SPI_INT_RXOI (1 << 3)
#define SPI_INT_RXFI (1 << 4)
#define SPI_INT_MSTI (1 << 5)
/* TX RX interrupt level threshhold, max can be 256 */
#define SPI_INT_THRESHOLD 32
enum dw_ssi_type {
SSI_MOTO_SPI = 0,
SSI_TI_SSP,
SSI_NS_MICROWIRE,
};
struct dw_spi_reg {
u32 ctrl0;
u32 ctrl1;
u32 ssienr;
u32 mwcr;
u32 ser;
u32 baudr;
u32 txfltr;
u32 rxfltr;
u32 txflr;
u32 rxflr;
u32 sr;
u32 imr;
u32 isr;
u32 risr;
u32 txoicr;
u32 rxoicr;
u32 rxuicr;
u32 msticr;
u32 icr;
u32 dmacr;
u32 dmatdlr;
u32 dmardlr;
u32 idr;
u32 version;
u32 dr; /* Currently oper as 32 bits,
though only low 16 bits matters */
} __packed;
struct dw_spi {
struct spi_master *master;
struct spi_device *cur_dev;
struct device *parent_dev;
enum dw_ssi_type type;
void __iomem *regs;
unsigned long paddr;
u32 iolen;
int irq;
u32 max_freq; /* max bus freq supported */
u16 bus_num;
u16 num_cs; /* supported slave numbers */
/* Driver message queue */
struct workqueue_struct *workqueue;
struct work_struct pump_messages;
spinlock_t lock;
struct list_head queue;
int busy;
int run;
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
/* Current message transfer state info */
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
struct chip_data *prev_chip;
size_t len;
void *tx;
void *tx_end;
void *rx;
void *rx_end;
int dma_mapped;
dma_addr_t rx_dma;
dma_addr_t tx_dma;
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes; /* current is a 1/2 bytes op */
u8 max_bits_per_word; /* maxim is 16b */
u32 dma_width;
int cs_change;
int (*write)(struct dw_spi *dws);
int (*read)(struct dw_spi *dws);
irqreturn_t (*transfer_handler)(struct dw_spi *dws);
void (*cs_control)(u32 command);
/* Dma info */
int dma_inited;
struct dma_chan *txchan;
struct dma_chan *rxchan;
int txdma_done;
int rxdma_done;
u64 tx_param;
u64 rx_param;
struct device *dma_dev;
dma_addr_t dma_addr;
/* Bus interface info */
void *priv;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
};
#define dw_readl(dw, name) \
__raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
#define dw_writel(dw, name, val) \
__raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
#define dw_readw(dw, name) \
__raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
#define dw_writew(dw, name, val) \
__raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
static inline void spi_enable_chip(struct dw_spi *dws, int enable)
{
dw_writel(dws, ssienr, (enable ? 1 : 0));
}
static inline void spi_set_clk(struct dw_spi *dws, u16 div)
{
dw_writel(dws, baudr, div);
}
static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
{
if (cs > dws->num_cs)
return;
dw_writel(dws, ser, 1 << cs);
}
/* Disable IRQ bits */
static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
new_mask = dw_readl(dws, imr) & ~mask;
dw_writel(dws, imr, new_mask);
}
/* Enable IRQ bits */
static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
{
u32 new_mask;
new_mask = dw_readl(dws, imr) | mask;
dw_writel(dws, imr, new_mask);
}
/*
* Each SPI slave device to work with dw_api controller should
* has such a structure claiming its working mode (PIO/DMA etc),
* which can be save in the "controller_data" member of the
* struct spi_device
*/
struct dw_spi_chip {
u8 poll_mode; /* 0 for contoller polling mode */
u8 type; /* SPI/SSP/Micrwire */
u8 enable_dma;
void (*cs_control)(u32 command);
};
extern int dw_spi_add_host(struct dw_spi *dws);
extern void dw_spi_remove_host(struct dw_spi *dws);
extern int dw_spi_suspend_host(struct dw_spi *dws);
extern int dw_spi_resume_host(struct dw_spi *dws);
#endif /* DW_SPI_HEADER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment