Commit 0da9bc6d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
 "A quiet release for SPI, some fixes and a couple of new drivers plus
  one small refactoring:

   - Move the chip select timing configuration from the controller to
     the device to allow a bit more flexibility

   - New drivers for Rockchip SFC and Spreadtrum ADI"

* tag 'spi-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (47 commits)
  spi: spi-zynq-qspi: use wait_for_completion_timeout to make zynq_qspi_exec_mem_op not interruptible
  spi: add sprd ADI for sc9863 and ums512
  spi: Convert sprd ADI bindings to yaml
  spi: sprd: Add ADI r3 support
  spi: sprd: Fix the wrong WDG_LOAD_VAL
  spi: davinci: invoke chipselect callback
  spi: sprd: fill offset only to RD_CMD register for reading from slave device
  spi: sprd: Make sure offset not equal to slave address size
  spi: sprd: Pass offset instead of physical address to adi_read/_write()
  spi: rockchip-sfc: Fix assigned but never used return error codes
  spi: rockchip-sfc: Remove redundant IO operations
  spi: stm32: fix excluded_middle.cocci warnings
  spi: coldfire-qspi: Use clk_disable_unprepare in the remove function
  spi: tegra20-slink: remove spi_master_put() in tegra_slink_remove()
  spi: rockchip-sfc: add rockchip serial flash controller
  spi: rockchip-sfc: Bindings for Rockchip serial flash controller
  spi: orion: Prevent incorrect chip select behaviour
  spi: mxic: add missing braces
  spi: spi-pic32: Fix issue with uninitialized dma_slave_config
  spi: spi-fsl-dspi: Fix issue with uninitialized dma_slave_config
  ...
parents d46e0d33 6e9c846a
......@@ -19,7 +19,6 @@ properties:
compatible:
enum:
- ibm,fsi2spi
- ibm,fsi2spi-restricted
reg:
items:
......
OMAP2+ McSPI device
Required properties:
- compatible :
- "ti,am654-mcspi" for AM654.
- "ti,omap2-mcspi" for OMAP2 & OMAP3.
- "ti,omap4-mcspi" for OMAP4+.
- ti,spi-num-cs : Number of chipselect supported by the instance.
- ti,hwmods: Name of the hwmod associated to the McSPI
- ti,pindir-d0-out-d1-in: Select the D0 pin as output and D1 as
input. The default is D0 as input and
D1 as output.
Optional properties:
- dmas: List of DMA specifiers with the controller specific format
as described in the generic DMA client binding. A tx and rx
specifier is required for each chip select.
- dma-names: List of DMA request names. These strings correspond
1:1 with the DMA specifiers listed in dmas. The string naming
is to be "rxN" and "txN" for RX and TX requests,
respectively, where N equals the chip select number.
Examples:
[hwmod populated DMA resources]
mcspi1: mcspi@1 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "ti,omap4-mcspi";
ti,hwmods = "mcspi1";
ti,spi-num-cs = <4>;
};
[generic DMA request binding]
mcspi1: mcspi@1 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "ti,omap4-mcspi";
ti,hwmods = "mcspi1";
ti,spi-num-cs = <2>;
dmas = <&edma 42
&edma 43
&edma 44
&edma 45>;
dma-names = "tx0", "rx0", "tx1", "rx1";
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/omap-spi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: SPI controller bindings for OMAP and K3 SoCs
maintainers:
- Aswath Govindraju <a-govindraju@ti.com>
allOf:
- $ref: spi-controller.yaml#
properties:
compatible:
oneOf:
- items:
- enum:
- ti,am654-mcspi
- ti,am4372-mcspi
- const: ti,omap4-mcspi
- items:
- enum:
- ti,omap2-mcspi
- ti,omap4-mcspi
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
power-domains:
maxItems: 1
ti,spi-num-cs:
$ref: /schemas/types.yaml#/definitions/uint32
description: Number of chipselect supported by the instance.
minimum: 1
maximum: 4
ti,hwmods:
$ref: /schemas/types.yaml#/definitions/string
description:
Must be "mcspi<n>", n being the instance number (1-based).
This property is applicable only on legacy platforms mainly omap2/3
and ti81xx and should not be used on other platforms.
deprecated: true
ti,pindir-d0-out-d1-in:
description:
Select the D0 pin as output and D1 as input. The default is D0
as input and D1 as output.
type: boolean
dmas:
description:
List of DMA specifiers with the controller specific format as
described in the generic DMA client binding. A tx and rx
specifier is required for each chip select.
minItems: 1
maxItems: 8
dma-names:
description:
List of DMA request names. These strings correspond 1:1 with
the DMA sepecifiers listed in dmas. The string names is to be
"rxN" and "txN" for RX and TX requests, respectively. Where N
is the chip select number.
minItems: 1
maxItems: 8
required:
- compatible
- reg
- interrupts
unevaluatedProperties: false
if:
properties:
compatible:
oneOf:
- const: ti,omap2-mcspi
- const: ti,omap4-mcspi
then:
properties:
ti,hwmods:
items:
- pattern: "^mcspi([1-9])$"
else:
properties:
ti,hwmods: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/soc/ti,sci_pm_domain.h>
spi@2100000 {
compatible = "ti,am654-mcspi","ti,omap4-mcspi";
reg = <0x2100000 0x400>;
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&k3_clks 137 1>;
power-domains = <&k3_pds 137 TI_SCI_PD_EXCLUSIVE>;
#address-cells = <1>;
#size-cells = <0>;
dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>;
dma-names = "tx0", "rx0";
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/rockchip-sfc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip Serial Flash Controller (SFC)
maintainers:
- Heiko Stuebner <heiko@sntech.de>
- Chris Morgan <macromorgan@hotmail.com>
allOf:
- $ref: spi-controller.yaml#
properties:
compatible:
const: rockchip,sfc
description:
The rockchip sfc controller is a standalone IP with version register,
and the driver can handle all the feature difference inside the IP
depending on the version register.
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
items:
- description: Bus Clock
- description: Module Clock
clock-names:
items:
- const: clk_sfc
- const: hclk_sfc
power-domains:
maxItems: 1
rockchip,sfc-no-dma:
description: Disable DMA and utilize FIFO mode only
type: boolean
patternProperties:
"^flash@[0-3]$":
type: object
properties:
reg:
minimum: 0
maximum: 3
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/px30-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/px30-power.h>
sfc: spi@ff3a0000 {
compatible = "rockchip,sfc";
reg = <0xff3a0000 0x4000>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_SFC>, <&cru HCLK_SFC>;
clock-names = "clk_sfc", "hclk_sfc";
pinctrl-0 = <&sfc_clk &sfc_cs &sfc_bus2>;
pinctrl-names = "default";
power-domains = <&power PX30_PD_MMC_NAND>;
#address-cells = <1>;
#size-cells = <0>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <108000000>;
spi-rx-bus-width = <2>;
spi-tx-bus-width = <2>;
};
};
...
......@@ -11,6 +11,7 @@ Required properties:
- mediatek,mt8135-spi: for mt8135 platforms
- mediatek,mt8173-spi: for mt8173 platforms
- mediatek,mt8183-spi: for mt8183 platforms
- mediatek,mt6893-spi: for mt6893 platforms
- "mediatek,mt8192-spi", "mediatek,mt6765-spi": for mt8192 platforms
- "mediatek,mt8195-spi", "mediatek,mt6765-spi": for mt8195 platforms
- "mediatek,mt8516-spi", "mediatek,mt2712-spi": for mt8516 platforms
......
Spreadtrum ADI controller
ADI is the abbreviation of Anolog-Digital interface, which is used to access
analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
framework for its hardware implementation is alike to SPI bus and its timing
is compatile to SPI timing.
ADI controller has 50 channels including 2 software read/write channels and
48 hardware channels to access analog chip. For 2 software read/write channels,
users should set ADI registers to access analog chip. For hardware channels,
we can configure them to allow other hardware components to use it independently,
which means we can just link one analog chip address to one hardware channel,
then users can access the mapped analog chip address by this hardware channel
triggered by hardware components instead of ADI software channels.
Thus we introduce one property named "sprd,hw-channels" to configure hardware
channels, the first value specifies the hardware channel id which is used to
transfer data triggered by hardware automatically, and the second value specifies
the analog chip address where user want to access by hardware components.
Since we have multi-subsystems will use unique ADI to access analog chip, when
one system is reading/writing data by ADI software channels, that should be under
one hardware spinlock protection to prevent other systems from reading/writing
data by ADI software channels at the same time, or two parallel routine of setting
ADI registers will make ADI controller registers chaos to lead incorrect results.
Then we need one hardware spinlock to synchronize between the multiple subsystems.
The new version ADI controller supplies multiple master channels for different
subsystem accessing, that means no need to add hardware spinlock to synchronize,
thus change the hardware spinlock support to be optional to keep backward
compatibility.
Required properties:
- compatible: Should be "sprd,sc9860-adi".
- reg: Offset and length of ADI-SPI controller register space.
- #address-cells: Number of cells required to define a chip select address
on the ADI-SPI bus. Should be set to 1.
- #size-cells: Size of cells required to define a chip select address size
on the ADI-SPI bus. Should be set to 0.
Optional properties:
- hwlocks: Reference to a phandle of a hwlock provider node.
- hwlock-names: Reference to hwlock name strings defined in the same order
as the hwlocks, should be "adi".
- sprd,hw-channels: This is an array of channel values up to 49 channels.
The first value specifies the hardware channel id which is used to
transfer data triggered by hardware automatically, and the second
value specifies the analog chip address where user want to access
by hardware components.
SPI slave nodes must be children of the SPI controller node and can contain
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
Example:
adi_bus: spi@40030000 {
compatible = "sprd,sc9860-adi";
reg = <0 0x40030000 0 0x10000>;
hwlocks = <&hwlock1 0>;
hwlock-names = "adi";
#address-cells = <1>;
#size-cells = <0>;
sprd,hw-channels = <30 0x8c20>;
};
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: "http://devicetree.org/schemas/spi/sprd,spi-adi.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Spreadtrum ADI controller
maintainers:
- Orson Zhai <orsonzhai@gmail.com>
- Baolin Wang <baolin.wang7@gmail.com>
- Chunyan Zhang <zhang.lyra@gmail.com>
description: |
ADI is the abbreviation of Anolog-Digital interface, which is used to access
analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
framework for its hardware implementation is alike to SPI bus and its timing
is compatile to SPI timing.
ADI controller has 50 channels including 2 software read/write channels and
48 hardware channels to access analog chip. For 2 software read/write channels,
users should set ADI registers to access analog chip. For hardware channels,
we can configure them to allow other hardware components to use it independently,
which means we can just link one analog chip address to one hardware channel,
then users can access the mapped analog chip address by this hardware channel
triggered by hardware components instead of ADI software channels.
Thus we introduce one property named "sprd,hw-channels" to configure hardware
channels, the first value specifies the hardware channel id which is used to
transfer data triggered by hardware automatically, and the second value specifies
the analog chip address where user want to access by hardware components.
Since we have multi-subsystems will use unique ADI to access analog chip, when
one system is reading/writing data by ADI software channels, that should be under
one hardware spinlock protection to prevent other systems from reading/writing
data by ADI software channels at the same time, or two parallel routine of setting
ADI registers will make ADI controller registers chaos to lead incorrect results.
Then we need one hardware spinlock to synchronize between the multiple subsystems.
The new version ADI controller supplies multiple master channels for different
subsystem accessing, that means no need to add hardware spinlock to synchronize,
thus change the hardware spinlock support to be optional to keep backward
compatibility.
allOf:
- $ref: /spi/spi-controller.yaml#
properties:
compatible:
enum:
- sprd,sc9860-adi
- sprd,sc9863-adi
- sprd,ums512-adi
reg:
maxItems: 1
hwlocks:
maxItems: 1
hwlock-names:
const: adi
sprd,hw-channels:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
description: A list of hardware channels
minItems: 1
maxItems: 48
items:
items:
- description: The hardware channel id which is used to transfer data
triggered by hardware automatically, channel id 0-1 are for software
use, 2-49 are hardware channels.
minimum: 2
maximum: 49
- description: The analog chip address where user want to access by
hardware components.
required:
- compatible
- reg
- '#address-cells'
- '#size-cells'
unevaluatedProperties: false
examples:
- |
aon {
#address-cells = <2>;
#size-cells = <2>;
adi_bus: spi@40030000 {
compatible = "sprd,sc9860-adi";
reg = <0 0x40030000 0 0x10000>;
hwlocks = <&hwlock1 0>;
hwlock-names = "adi";
#address-cells = <1>;
#size-cells = <0>;
sprd,hw-channels = <30 0x8c20>;
};
};
...
......@@ -658,6 +658,18 @@ config SPI_ROCKCHIP
The main usecase of this controller is to use spi flash as boot
device.
config SPI_ROCKCHIP_SFC
tristate "Rockchip Serial Flash Controller (SFC)"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM && HAS_DMA
help
This enables support for Rockchip serial flash controller. This
is a specialized controller used to access SPI flash on some
Rockchip SOCs.
ROCKCHIP SFC supports DMA and PIO modes. When DMA is not available,
the driver automatically falls back to PIO mode.
config SPI_RB4XX
tristate "Mikrotik RB4XX SPI master"
depends on SPI_MASTER && ATH79
......
......@@ -95,6 +95,7 @@ obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
......
......@@ -143,12 +143,12 @@ static void bcm2835aux_debugfs_remove(struct bcm2835aux_spi *bs)
}
#endif /* CONFIG_DEBUG_FS */
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned reg)
static inline u32 bcm2835aux_rd(struct bcm2835aux_spi *bs, unsigned int reg)
{
return readl(bs->regs + reg);
}
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned reg,
static inline void bcm2835aux_wr(struct bcm2835aux_spi *bs, unsigned int reg,
u32 val)
{
writel(val, bs->regs + reg);
......
......@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
mcfqspi_cs_teardown(mcfqspi);
clk_disable(mcfqspi->clk);
clk_disable_unprepare(mcfqspi->clk);
return 0;
}
......
......@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* line for the controller
*/
if (spi->cs_gpiod) {
/*
* FIXME: is this code ever executed? This host does not
* set SPI_MASTER_GPIO_SS so this chipselect callback should
* not get called from the SPI core when we are using
* GPIOs for chip select.
*/
if (value == BITBANG_CS_ACTIVE)
gpiod_set_value(spi->cs_gpiod, 1);
else
......@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
master->flags = SPI_MASTER_MUST_RX;
master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
master->setup = davinci_spi_setup;
master->cleanup = davinci_spi_cleanup;
master->can_dma = davinci_spi_can_dma;
......
......@@ -550,7 +550,7 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
u32 val;
int ret;
ret = clk_enable(espi->clk);
ret = clk_prepare_enable(espi->clk);
if (ret)
return ret;
......@@ -570,7 +570,7 @@ static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
val &= ~SSPCR1_SSE;
writel(val, espi->mmio + SSPCR1);
clk_disable(espi->clk);
clk_disable_unprepare(espi->clk);
return 0;
}
......
......@@ -25,16 +25,11 @@
#define SPI_FSI_BASE 0x70000
#define SPI_FSI_INIT_TIMEOUT_MS 1000
#define SPI_FSI_MAX_XFR_SIZE 2048
#define SPI_FSI_MAX_XFR_SIZE_RESTRICTED 8
#define SPI_FSI_MAX_RX_SIZE 8
#define SPI_FSI_MAX_TX_SIZE 40
#define SPI_FSI_ERROR 0x0
#define SPI_FSI_COUNTER_CFG 0x1
#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
#define SPI_FSI_COUNTER_CFG_N2_RX BIT_ULL(8)
#define SPI_FSI_COUNTER_CFG_N2_TX BIT_ULL(9)
#define SPI_FSI_COUNTER_CFG_N2_IMPLICIT BIT_ULL(10)
#define SPI_FSI_COUNTER_CFG_N2_RELOAD BIT_ULL(11)
#define SPI_FSI_CFG1 0x2
#define SPI_FSI_CLOCK_CFG 0x3
#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
......@@ -76,8 +71,6 @@ struct fsi_spi {
struct device *dev; /* SPI controller device */
struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
u32 base;
size_t max_xfr_size;
bool restricted;
};
struct fsi_spi_sequence {
......@@ -241,7 +234,7 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
}
static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
{
/*
* Add the next byte of instruction to the 8-byte sequence register.
......@@ -251,8 +244,6 @@ static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
*/
seq->data |= (u64)val << seq->bit;
seq->bit -= 8;
return ((64 - seq->bit) / 8) - 2;
}
static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
......@@ -261,71 +252,11 @@ static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
seq->data = 0ULL;
}
static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
struct fsi_spi_sequence *seq,
struct spi_transfer *transfer)
{
int loops;
int idx;
int rc;
u8 val = 0;
u8 len = min(transfer->len, 8U);
u8 rem = transfer->len % len;
loops = transfer->len / len;
if (transfer->tx_buf) {
val = SPI_FSI_SEQUENCE_SHIFT_OUT(len);
idx = fsi_spi_sequence_add(seq, val);
if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
} else if (transfer->rx_buf) {
val = SPI_FSI_SEQUENCE_SHIFT_IN(len);
idx = fsi_spi_sequence_add(seq, val);
if (rem)
rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
} else {
return -EINVAL;
}
if (ctx->restricted && loops > 1) {
dev_warn(ctx->dev,
"Transfer too large; no branches permitted.\n");
return -EINVAL;
}
if (loops > 1) {
u64 cfg = SPI_FSI_COUNTER_CFG_LOOPS(loops - 1);
fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
if (transfer->rx_buf)
cfg |= SPI_FSI_COUNTER_CFG_N2_RX |
SPI_FSI_COUNTER_CFG_N2_TX |
SPI_FSI_COUNTER_CFG_N2_IMPLICIT |
SPI_FSI_COUNTER_CFG_N2_RELOAD;
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, cfg);
if (rc)
return rc;
} else {
fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
}
if (rem)
fsi_spi_sequence_add(seq, rem);
return 0;
}
static int fsi_spi_transfer_data(struct fsi_spi *ctx,
struct spi_transfer *transfer)
{
int rc = 0;
u64 status = 0ULL;
u64 cfg = 0ULL;
if (transfer->tx_buf) {
int nb;
......@@ -363,16 +294,6 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
u64 in = 0ULL;
u8 *rx = transfer->rx_buf;
rc = fsi_spi_read_reg(ctx, SPI_FSI_COUNTER_CFG, &cfg);
if (rc)
return rc;
if (cfg & SPI_FSI_COUNTER_CFG_N2_IMPLICIT) {
rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, 0);
if (rc)
return rc;
}
while (transfer->len > recv) {
do {
rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
......@@ -439,6 +360,10 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
}
} while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 0ULL);
if (rc)
return rc;
rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
if (rc)
return rc;
......@@ -459,6 +384,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
{
int rc;
u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
unsigned int len;
struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
......@@ -471,8 +397,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *next = NULL;
/* Sequencer must do shift out (tx) first. */
if (!transfer->tx_buf ||
transfer->len > (ctx->max_xfr_size + 8)) {
if (!transfer->tx_buf || transfer->len > SPI_FSI_MAX_TX_SIZE) {
rc = -EINVAL;
goto error;
}
......@@ -486,9 +411,13 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
fsi_spi_sequence_init(&seq);
fsi_spi_sequence_add(&seq, seq_slave);
rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
if (rc)
goto error;
len = transfer->len;
while (len > 8) {
fsi_spi_sequence_add(&seq,
SPI_FSI_SEQUENCE_SHIFT_OUT(8));
len -= 8;
}
fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
if (!list_is_last(&transfer->transfer_list,
&mesg->transfers)) {
......@@ -496,7 +425,9 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
/* Sequencer can only do shift in (rx) after tx. */
if (next->rx_buf) {
if (next->len > ctx->max_xfr_size) {
u8 shift;
if (next->len > SPI_FSI_MAX_RX_SIZE) {
rc = -EINVAL;
goto error;
}
......@@ -504,10 +435,8 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
next->len);
rc = fsi_spi_sequence_transfer(ctx, &seq,
next);
if (rc)
goto error;
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
} else {
next = NULL;
}
......@@ -541,9 +470,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
{
struct fsi_spi *ctx = spi_controller_get_devdata(spi->controller);
return ctx->max_xfr_size;
return SPI_FSI_MAX_RX_SIZE;
}
static int fsi_spi_probe(struct device *dev)
......@@ -582,14 +509,6 @@ static int fsi_spi_probe(struct device *dev)
ctx->fsi = fsi;
ctx->base = base + SPI_FSI_BASE;
if (of_device_is_compatible(np, "ibm,fsi2spi-restricted")) {
ctx->restricted = true;
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE_RESTRICTED;
} else {
ctx->restricted = false;
ctx->max_xfr_size = SPI_FSI_MAX_XFR_SIZE;
}
rc = devm_spi_register_controller(dev, ctlr);
if (rc)
spi_controller_put(ctlr);
......
......@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
goto err_rx_dma_buf;
}
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = phy_addr + SPI_POPR;
cfg.dst_addr = phy_addr + SPI_PUSHR;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
......
......@@ -549,12 +549,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
*/
spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
/*
* TX_WATERMARK_REG should be set after SPI configuration and
* setting up GENI SE engine, as driver starts data transfer
* for the watermark interrupt.
*/
if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
......
......@@ -1052,12 +1052,8 @@ static void spi_imx_set_burst_len(struct spi_imx_data *spi_imx, int n_bits)
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
unsigned int burst_len, fifo_words;
unsigned int burst_len;
if (spi_imx->dynamic_burst)
fifo_words = 4;
else
fifo_words = spi_imx_bytes_per_word(spi_imx->bits_per_word);
/*
* Reload the FIFO when the remaining bytes to be transferred in the
* current burst is 0. This only applies when bits_per_word is a
......@@ -1076,7 +1072,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->remainder = burst_len;
} else {
spi_imx->remainder = fifo_words;
spi_imx->remainder = spi_imx_bytes_per_word(spi_imx->bits_per_word);
}
}
......@@ -1084,8 +1080,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
if (!spi_imx->count)
break;
if (spi_imx->dynamic_burst &&
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
fifo_words))
spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder, 4))
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
......@@ -1195,6 +1190,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
* dynamic_burst in that case.
*/
if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
!(spi->mode & SPI_CS_WORD) &&
(spi_imx->bits_per_word == 8 ||
spi_imx->bits_per_word == 16 ||
spi_imx->bits_per_word == 32)) {
......@@ -1630,6 +1626,15 @@ static int spi_imx_probe(struct platform_device *pdev)
is_imx53_ecspi(spi_imx))
spi_imx->bitbang.master->mode_bits |= SPI_LOOP | SPI_READY;
if (is_imx51_ecspi(spi_imx) &&
device_property_read_u32(&pdev->dev, "cs-gpios", NULL))
/*
* When using HW-CS implementing SPI_CS_WORD can be done by just
* setting the burst length to the word size. This is
* considerably faster than manually controlling the CS.
*/
spi_imx->bitbang.master->mode_bits |= SPI_CS_WORD;
spi_imx->spi_drctl = spi_drctl;
init_completion(&spi_imx->xfer_done);
......
......@@ -42,8 +42,9 @@
#define SPI_CFG1_CS_IDLE_OFFSET 0
#define SPI_CFG1_PACKET_LOOP_OFFSET 8
#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
......@@ -90,6 +91,8 @@ struct mtk_spi_compatible {
bool enhance_timing;
/* some IC support DMA addr extension */
bool dma_ext;
/* some IC no need unprepare SPI clk */
bool no_need_unprepare;
};
struct mtk_spi {
......@@ -104,6 +107,7 @@ struct mtk_spi {
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
u32 spi_clk_hz;
};
static const struct mtk_spi_compatible mtk_common_compat;
......@@ -135,12 +139,21 @@ static const struct mtk_spi_compatible mt8183_compat = {
.enhance_timing = true,
};
static const struct mtk_spi_compatible mt6893_compat = {
.need_pad_sel = true,
.must_tx = true,
.enhance_timing = true,
.dma_ext = true,
.no_need_unprepare = true,
};
/*
* A piece of default chip info unless the platform
* supplies it.
*/
static const struct mtk_chip_config mtk_default_chip_info = {
.sample_sel = 0,
.tick_delay = 0,
};
static const struct of_device_id mtk_spi_of_match[] = {
......@@ -174,6 +187,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,mt8192-spi",
.data = (void *)&mt6765_compat,
},
{ .compatible = "mediatek,mt6893-spi",
.data = (void *)&mt6893_compat,
},
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
......@@ -192,6 +208,65 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
writel(reg_val, mdata->base + SPI_CMD_REG);
}
static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
struct spi_delay *cs_setup = &spi->cs_setup;
struct spi_delay *cs_hold = &spi->cs_hold;
struct spi_delay *cs_inactive = &spi->cs_inactive;
u32 setup, hold, inactive;
u32 reg_val;
int delay;
delay = spi_delay_to_ns(cs_setup, NULL);
if (delay < 0)
return delay;
setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
delay = spi_delay_to_ns(cs_hold, NULL);
if (delay < 0)
return delay;
hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
delay = spi_delay_to_ns(cs_inactive, NULL);
if (delay < 0)
return delay;
inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
setup = setup ? setup : 1;
hold = hold ? hold : 1;
inactive = inactive ? inactive : 1;
reg_val = readl(mdata->base + SPI_CFG0_REG);
if (mdata->dev_comp->enhance_timing) {
hold = min_t(u32, hold, 0x10000);
setup = min_t(u32, setup, 0x10000);
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
} else {
hold = min_t(u32, hold, 0x100);
setup = min_t(u32, setup, 0x100);
reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup - 1) & 0xff)
<< SPI_CFG0_CS_SETUP_OFFSET);
}
writel(reg_val, mdata->base + SPI_CFG0_REG);
inactive = min_t(u32, inactive, 0x100);
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
writel(reg_val, mdata->base + SPI_CFG1_REG);
return 0;
}
static int mtk_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
......@@ -261,6 +336,15 @@ static int mtk_spi_prepare_message(struct spi_master *master,
writel(mdata->pad_sel[spi->chip_select],
mdata->base + SPI_PAD_SEL_REG);
/* tick delay */
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
reg_val |= ((chip_config->tick_delay & 0x7)
<< SPI_CFG1_GET_TICK_DLY_OFFSET);
writel(reg_val, mdata->base + SPI_CFG1_REG);
/* set hw cs timing */
mtk_spi_set_hw_cs_timing(spi);
return 0;
}
......@@ -287,12 +371,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
static void mtk_spi_prepare_transfer(struct spi_master *master,
struct spi_transfer *xfer)
{
u32 spi_clk_hz, div, sck_time, reg_val;
u32 div, sck_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
spi_clk_hz = clk_get_rate(mdata->spi_clk);
if (xfer->speed_hz < spi_clk_hz / 2)
div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
if (xfer->speed_hz < mdata->spi_clk_hz / 2)
div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
else
div = 1;
......@@ -507,52 +590,6 @@ static bool mtk_spi_can_dma(struct spi_master *master,
(unsigned long)xfer->rx_buf % 4 == 0);
}
static int mtk_spi_set_hw_cs_timing(struct spi_device *spi,
struct spi_delay *setup,
struct spi_delay *hold,
struct spi_delay *inactive)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
u16 setup_dly, hold_dly, inactive_dly;
u32 reg_val;
if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
(hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
(inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit, should be SPI_DELAY_UNIT_SCK\n");
return -EINVAL;
}
setup_dly = setup ? setup->value : 1;
hold_dly = hold ? hold->value : 1;
inactive_dly = inactive ? inactive->value : 1;
reg_val = readl(mdata->base + SPI_CFG0_REG);
if (mdata->dev_comp->enhance_timing) {
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold_dly - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup_dly - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
} else {
reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
reg_val |= (((hold_dly - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
reg_val |= (((setup_dly - 1) & 0xff)
<< SPI_CFG0_CS_SETUP_OFFSET);
}
writel(reg_val, mdata->base + SPI_CFG0_REG);
reg_val = readl(mdata->base + SPI_CFG1_REG);
reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
reg_val |= (((inactive_dly - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
writel(reg_val, mdata->base + SPI_CFG1_REG);
return 0;
}
static int mtk_spi_setup(struct spi_device *spi)
{
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
......@@ -790,7 +827,12 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
clk_disable_unprepare(mdata->spi_clk);
mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
if (mdata->dev_comp->no_need_unprepare)
clk_disable(mdata->spi_clk);
else
clk_disable_unprepare(mdata->spi_clk);
pm_runtime_enable(&pdev->dev);
......@@ -858,6 +900,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
mtk_spi_reset(mdata);
if (mdata->dev_comp->no_need_unprepare)
clk_unprepare(mdata->spi_clk);
return 0;
}
......@@ -906,7 +951,10 @@ static int mtk_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mtk_spi *mdata = spi_master_get_devdata(master);
clk_disable_unprepare(mdata->spi_clk);
if (mdata->dev_comp->no_need_unprepare)
clk_disable(mdata->spi_clk);
else
clk_disable_unprepare(mdata->spi_clk);
return 0;
}
......@@ -917,7 +965,10 @@ static int mtk_spi_runtime_resume(struct device *dev)
struct mtk_spi *mdata = spi_master_get_devdata(master);
int ret;
ret = clk_prepare_enable(mdata->spi_clk);
if (mdata->dev_comp->no_need_unprepare)
ret = clk_enable(mdata->spi_clk);
else
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
......
......@@ -335,8 +335,10 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
bool all_false;
if (op->data.buswidth > 8 || op->addr.buswidth > 8 ||
op->dummy.buswidth > 8 || op->cmd.buswidth > 8)
return false;
if (op->data.nbytes && op->dummy.nbytes &&
......@@ -346,7 +348,13 @@ static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
if (op->addr.nbytes > 7)
return false;
return spi_mem_default_supports_op(mem, op);
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
if (all_false)
return spi_mem_default_supports_op(mem, op);
else
return spi_mem_dtr_supports_op(mem, op);
}
static int mxic_spi_mem_exec_op(struct spi_mem *mem,
......@@ -355,14 +363,15 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
int nio = 1, i, ret;
u32 ss_ctrl;
u8 addr[8];
u8 opcode = op->cmd.opcode;
u8 addr[8], cmd[2];
ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
if (ret)
return ret;
if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
if (mem->spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL))
nio = 8;
else if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
nio = 4;
else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
nio = 2;
......@@ -374,19 +383,26 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
mxic->regs + HC_CFG);
writel(HC_EN_BIT, mxic->regs + HC_EN);
ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
ss_ctrl = OP_CMD_BYTES(op->cmd.nbytes) |
OP_CMD_BUSW(fls(op->cmd.buswidth) - 1) |
(op->cmd.dtr ? OP_CMD_DDR : 0);
if (op->addr.nbytes)
ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
OP_ADDR_BUSW(fls(op->addr.buswidth) - 1) |
(op->addr.dtr ? OP_ADDR_DDR : 0);
if (op->dummy.nbytes)
ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
if (op->data.nbytes) {
ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
if (op->data.dir == SPI_MEM_DATA_IN)
ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1) |
(op->data.dtr ? OP_DATA_DDR : 0);
if (op->data.dir == SPI_MEM_DATA_IN) {
ss_ctrl |= OP_READ;
if (op->data.dtr)
ss_ctrl |= OP_DQS_EN;
}
}
writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
......@@ -394,7 +410,10 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem,
writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
mxic->regs + HC_CFG);
ret = mxic_spi_data_xfer(mxic, &opcode, NULL, 1);
for (i = 0; i < op->cmd.nbytes; i++)
cmd[i] = op->cmd.opcode >> (8 * (op->cmd.nbytes - i - 1));
ret = mxic_spi_data_xfer(mxic, cmd, NULL, op->cmd.nbytes);
if (ret)
goto out;
......@@ -567,7 +586,8 @@ static int mxic_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA |
SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD;
SPI_RX_QUAD | SPI_TX_QUAD |
SPI_RX_OCTAL | SPI_TX_OCTAL;
mxic_spi_hw_init(mxic);
......
......@@ -328,8 +328,16 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
static void orion_spi_set_cs(struct spi_device *spi, bool enable)
{
struct orion_spi *orion_spi;
void __iomem *ctrl_reg;
u32 val;
orion_spi = spi_master_get_devdata(spi->master);
ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
val = readl(ctrl_reg);
/* Clear existing chip-select and assertion state */
val &= ~(ORION_SPI_CS_MASK | 0x1);
/*
* If this line is using a GPIO to control chip select, this internal
......@@ -338,9 +346,7 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
* as it is handled by a GPIO, but that doesn't matter. What we need
* is to deassert the old chip select and assert some other chip select.
*/
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
ORION_SPI_CS(spi->chip_select));
val |= ORION_SPI_CS(spi->chip_select);
/*
* Chip select logic is inverted from spi_set_cs(). For lines using a
......@@ -350,9 +356,13 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
* doesn't matter.
*/
if (!enable)
orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
else
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
val |= 0x1;
/*
* To avoid toggling unwanted chip selects update the register
* with a single write.
*/
writel(val, ctrl_reg);
}
static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
......
......@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
struct dma_slave_config cfg;
int ret;
memset(&cfg, 0, sizeof(cfg));
cfg.device_fc = true;
cfg.src_addr = pic32s->dma_base + buf_offset;
cfg.dst_addr = pic32s->dma_base + buf_offset;
......
......@@ -594,24 +594,29 @@ static int u32_reader(struct driver_data *drv_data)
static void reset_sccr1(struct driver_data *drv_data)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->controller->cur_msg->spi);
u32 sccr1_reg;
u32 mask = drv_data->int_cr1 | drv_data->dma_cr1, threshold;
struct chip_data *chip;
if (drv_data->controller->cur_msg) {
chip = spi_get_ctldata(drv_data->controller->cur_msg->spi);
threshold = chip->threshold;
} else {
threshold = 0;
}
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
mask |= QUARK_X1000_SSCR1_RFT;
break;
case CE4100_SSP:
sccr1_reg &= ~CE4100_SSCR1_RFT;
mask |= CE4100_SSCR1_RFT;
break;
default:
sccr1_reg &= ~SSCR1_RFT;
mask |= SSCR1_RFT;
break;
}
sccr1_reg |= chip->threshold;
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
pxa2xx_spi_update(drv_data, SSCR1, mask, threshold);
}
static void int_stop_and_reset(struct driver_data *drv_data)
......@@ -724,11 +729,8 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
static void handle_bad_msg(struct driver_data *drv_data)
{
int_stop_and_reset(drv_data);
pxa2xx_spi_off(drv_data);
clear_SSCR1_bits(drv_data, drv_data->int_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
write_SSSR_CS(drv_data, drv_data->clear_sr);
dev_err(drv_data->ssp->dev, "bad message state in interrupt handler\n");
}
......@@ -1156,13 +1158,10 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller,
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
int_stop_and_reset(drv_data);
/* Disable the SSP */
pxa2xx_spi_off(drv_data);
/* Clear and disable interrupts and service requests */
write_SSSR_CS(drv_data, drv_data->clear_sr);
clear_SSCR1_bits(drv_data, drv_data->int_cr1 | drv_data->dma_cr1);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
/*
* Stop the DMA if running. Note DMA callback handler may have unset
......
This diff is collapsed.
This diff is collapsed.
......@@ -162,6 +162,8 @@
#define SPI_3WIRE_TX 3
#define SPI_3WIRE_RX 4
#define STM32_SPI_AUTOSUSPEND_DELAY 1 /* 1 ms */
/*
* use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
* without fifo buffers.
......@@ -568,29 +570,30 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
* @flush: boolean indicating that FIFO should be flushed
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
*/
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi)
{
u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
while ((spi->rx_len > 0) &&
((sr & STM32H7_SPI_SR_RXP) ||
(flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
((sr & STM32H7_SPI_SR_EOT) &&
((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
u32 offs = spi->cur_xferlen - spi->rx_len;
if ((spi->rx_len >= sizeof(u32)) ||
(flush && (sr & STM32H7_SPI_SR_RXWNE))) {
(sr & STM32H7_SPI_SR_RXWNE)) {
u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
*rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
spi->rx_len -= sizeof(u32);
} else if ((spi->rx_len >= sizeof(u16)) ||
(flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
(!(sr & STM32H7_SPI_SR_RXWNE) &&
(rxplvl >= 2 || spi->cur_bpw > 8))) {
u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
*rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
......@@ -606,8 +609,8 @@ static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr);
}
dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
flush ? "(flush)" : "", spi->rx_len);
dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n",
__func__, spi->rx_len, sr);
}
/**
......@@ -674,18 +677,12 @@ static void stm32f4_spi_disable(struct stm32_spi *spi)
* stm32h7_spi_disable - Disable SPI controller
* @spi: pointer to the spi controller data structure
*
* RX-Fifo is flushed when SPI controller is disabled. To prevent any data
* loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
* RX-Fifo.
* Normally, if TSIZE has been configured, we should relax the hardware at the
* reception of the EOT interrupt. But in case of error, EOT will not be
* raised. So the subsystem unprepare_message call allows us to properly
* complete the transfer from an hardware point of view.
* RX-Fifo is flushed when SPI controller is disabled.
*/
static void stm32h7_spi_disable(struct stm32_spi *spi)
{
unsigned long flags;
u32 cr1, sr;
u32 cr1;
dev_dbg(spi->dev, "disable controller\n");
......@@ -698,25 +695,6 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
return;
}
/* Wait on EOT or suspend the flow */
if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
sr, !(sr & STM32H7_SPI_SR_EOT),
10, 100000) < 0) {
if (cr1 & STM32H7_SPI_CR1_CSTART) {
writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
spi->base + STM32H7_SPI_CR1);
if (readl_relaxed_poll_timeout_atomic(
spi->base + STM32H7_SPI_SR,
sr, !(sr & STM32H7_SPI_SR_SUSP),
10, 100000) < 0)
dev_warn(spi->dev,
"Suspend request timeout\n");
}
}
if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
stm32h7_spi_read_rxfifo(spi, true);
if (spi->cur_usedma && spi->dma_tx)
dmaengine_terminate_all(spi->dma_tx);
if (spi->cur_usedma && spi->dma_rx)
......@@ -911,7 +889,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
if (__ratelimit(&rs))
dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
stm32h7_spi_read_rxfifo(spi);
/*
* If communication is suspended while using DMA, it means
* that something went wrong, so stop the current transfer
......@@ -932,8 +910,10 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
if (sr & STM32H7_SPI_SR_EOT) {
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, true);
end = true;
stm32h7_spi_read_rxfifo(spi);
if (!spi->cur_usedma ||
(spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX))
end = true;
}
if (sr & STM32H7_SPI_SR_TXP)
......@@ -942,7 +922,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
if (sr & STM32H7_SPI_SR_RXP)
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
stm32h7_spi_read_rxfifo(spi);
writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
......@@ -1041,42 +1021,17 @@ static void stm32f4_spi_dma_tx_cb(void *data)
}
/**
* stm32f4_spi_dma_rx_cb - dma callback
* stm32_spi_dma_rx_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA RX channel.
*/
static void stm32f4_spi_dma_rx_cb(void *data)
static void stm32_spi_dma_rx_cb(void *data)
{
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->master);
stm32f4_spi_disable(spi);
}
/**
* stm32h7_spi_dma_cb - dma callback
* @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete or when an error
* occurs. If the transfer is complete, EOT flag is raised.
*/
static void stm32h7_spi_dma_cb(void *data)
{
struct stm32_spi *spi = data;
unsigned long flags;
u32 sr;
spin_lock_irqsave(&spi->lock, flags);
sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
spin_unlock_irqrestore(&spi->lock, flags);
if (!(sr & STM32H7_SPI_SR_EOT))
dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
/* Now wait for EOT, or SUSP or OVR in case of error */
spi->cfg->disable(spi);
}
/**
......@@ -1242,11 +1197,13 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
/* Enable the interrupts relative to the end of transfer */
stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
STM32H7_SPI_IER_TXTFIE |
STM32H7_SPI_IER_OVRIE |
STM32H7_SPI_IER_MODFIE);
uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
/* Enable the interrupts */
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX)
ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE;
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
stm32_spi_enable(spi);
......@@ -1645,10 +1602,6 @@ static int stm32_spi_transfer_one(struct spi_master *master,
struct stm32_spi *spi = spi_master_get_devdata(master);
int ret;
/* Don't do anything on 0 bytes transfers */
if (transfer->len == 0)
return 0;
spi->tx_buf = transfer->tx_buf;
spi->rx_buf = transfer->rx_buf;
spi->tx_len = spi->tx_buf ? transfer->len : 0;
......@@ -1762,7 +1715,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.set_mode = stm32f4_spi_set_mode,
.transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
.dma_tx_cb = stm32f4_spi_dma_tx_cb,
.dma_rx_cb = stm32f4_spi_dma_rx_cb,
.dma_rx_cb = stm32_spi_dma_rx_cb,
.transfer_one_irq = stm32f4_spi_transfer_one_irq,
.irq_handler_event = stm32f4_spi_irq_event,
.irq_handler_thread = stm32f4_spi_irq_thread,
......@@ -1782,8 +1735,11 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.set_data_idleness = stm32h7_spi_data_idleness,
.set_number_of_data = stm32h7_spi_number_of_data,
.transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
.dma_rx_cb = stm32h7_spi_dma_cb,
.dma_tx_cb = stm32h7_spi_dma_cb,
.dma_rx_cb = stm32_spi_dma_rx_cb,
/*
* dma_tx_cb is not necessary since in case of TX, dma is followed by
* SPI access hence handling is performed within the SPI interrupt
*/
.transfer_one_irq = stm32h7_spi_transfer_one_irq,
.irq_handler_thread = stm32h7_spi_irq_thread,
.baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
......@@ -1927,6 +1883,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
if (spi->dma_tx || spi->dma_rx)
master->can_dma = stm32_spi_can_dma;
pm_runtime_set_autosuspend_delay(&pdev->dev,
STM32_SPI_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_enable(&pdev->dev);
......@@ -1938,6 +1897,9 @@ static int stm32_spi_probe(struct platform_device *pdev)
goto err_pm_disable;
}
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&pdev->dev, "driver initialized\n");
return 0;
......@@ -1946,6 +1908,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
err_dma_release:
if (spi->dma_tx)
dma_release_channel(spi->dma_tx);
......@@ -1970,6 +1933,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
......
......@@ -717,12 +717,12 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
dma_release_channel(dma_chan);
}
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi,
struct spi_delay *setup,
struct spi_delay *hold,
struct spi_delay *inactive)
static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
{
struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
struct spi_delay *setup = &spi->cs_setup;
struct spi_delay *hold = &spi->cs_hold;
struct spi_delay *inactive = &spi->cs_inactive;
u8 setup_dly, hold_dly, inactive_dly;
u32 setup_hold;
u32 spi_cs_timing;
......
......@@ -1061,33 +1061,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Can not get clock %d\n", ret);
goto exit_free_master;
}
ret = clk_prepare(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
goto exit_free_master;
}
ret = clk_enable(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
goto exit_clk_unprepare;
}
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
tegra_slink_isr_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), tspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
goto exit_free_irq;
goto exit_free_master;
}
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
......@@ -1095,7 +1074,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = tegra_slink_init_dma_param(tspi, true);
if (ret < 0)
goto exit_free_irq;
goto exit_free_master;
ret = tegra_slink_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
......@@ -1106,16 +1085,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
init_completion(&tspi->xfer_completion);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra_slink_runtime_resume(&pdev->dev);
if (ret)
goto exit_pm_disable;
}
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
pm_runtime_put_noidle(&pdev->dev);
goto exit_pm_disable;
}
......@@ -1123,33 +1095,43 @@ static int tegra_slink_probe(struct platform_device *pdev)
udelay(2);
reset_control_deassert(tspi->rst);
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
tegra_slink_isr_thread, IRQF_ONESHOT,
dev_name(&pdev->dev), tspi);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
goto exit_pm_put;
}
tspi->def_command_reg = SLINK_M_S;
tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
pm_runtime_put(&pdev->dev);
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
goto exit_pm_disable;
goto exit_free_irq;
}
pm_runtime_put(&pdev->dev);
return ret;
exit_free_irq:
free_irq(spi_irq, tspi);
exit_pm_put:
pm_runtime_put(&pdev->dev);
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_slink_runtime_suspend(&pdev->dev);
tegra_slink_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
exit_clk_disable:
clk_disable(tspi->clk);
exit_clk_unprepare:
clk_unprepare(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
......@@ -1160,10 +1142,11 @@ static int tegra_slink_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
spi_unregister_master(master);
free_irq(tspi->irq, tspi);
clk_disable(tspi->clk);
clk_unprepare(tspi->clk);
pm_runtime_disable(&pdev->dev);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
......@@ -1171,10 +1154,6 @@ static int tegra_slink_remove(struct platform_device *pdev)
if (tspi->rx_dma_chan)
tegra_slink_deinit_dma_param(tspi, true);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_slink_runtime_suspend(&pdev->dev);
return 0;
}
......
......@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
......@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
......@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
......@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
......
......@@ -846,9 +846,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
!spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->controller->cs_setup, NULL);
spi_delay_exec(&spi->cs_setup, NULL);
else
spi_delay_exec(&spi->controller->cs_hold, NULL);
spi_delay_exec(&spi->cs_hold, NULL);
}
if (spi->mode & SPI_CS_HIGH)
......@@ -891,7 +891,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
!spi->controller->set_cs_timing) {
if (!activate)
spi_delay_exec(&spi->controller->cs_inactive, NULL);
spi_delay_exec(&spi->cs_inactive, NULL);
}
}
......
......@@ -12,5 +12,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
u32 sample_sel;
u32 tick_delay;
};
#endif
......@@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
*
* @cs_setup: delay to be introduced by the controller after CS is asserted
* @cs_hold: delay to be introduced by the controller before CS is deasserted
* @cs_inactive: delay to be introduced by the controller after CS is
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @statistics: statistics for the spi_device
*
* A @spi_device is used to interchange data between an SPI slave
......@@ -188,6 +192,10 @@ struct spi_device {
int cs_gpio; /* LEGACY: chip select gpio */
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
struct spi_delay word_delay; /* inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
/* the statistics */
struct spi_statistics statistics;
......@@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @slave: indicates that this is an SPI slave controller
* @devm_allocated: whether the allocation of this struct is devres-managed
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @max_message_size: function that returns the max message size for
......@@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @cs_setup: delay to be introduced by the controller after CS is asserted
* @cs_hold: delay to be introduced by the controller before CS is deasserted
* @cs_inactive: delay to be introduced by the controller after CS is
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
* CS number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
......@@ -511,7 +515,7 @@ struct spi_controller {
#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
/* flag indicating this is a non-devres managed controller */
/* flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
/* flag indicating this is an SPI slave controller */
......@@ -550,8 +554,7 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
struct spi_delay *hold, struct spi_delay *inactive);
int (*set_cs_timing)(struct spi_device *spi);
/* bidirectional bulk transfers
*
......@@ -638,11 +641,6 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
/* CS delays */
struct spi_delay cs_setup;
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
/* gpio chip select */
int *cs_gpios;
struct gpio_desc **cs_gpiods;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment