Commit 50635787 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'spi-fix-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi fixes from Mark Brown:
 "Several fixes that came in since the merge window, the major one being
  a fix for the spi-mux driver which was broken by the performance
  optimisations due to it peering inside the core's data structures more
  than it should"

* tag 'spi-fix-v6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi: spi: Fix queue hang if previous transfer failed
  spi: mux: Fix mux interaction with fast path optimisations
  spi: cadence-quadspi: Disable irqs during indirect reads
  spi: bitbang: Fix lsb-first Rx
parents c5e68c4f 9c9c9da7
...@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi, ...@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
{ {
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
u8 rxbit = bits - 1;
u32 oldbit = !(word & 1); u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */ /* clock starts at inactive polarity */
for (; likely(bits); bits--) { for (; likely(bits); bits--) {
...@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi, ...@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */ /* sample LSB (from slave) on leading edge */
word >>= 1; word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0) if ((flags & SPI_MASTER_NO_RX) == 0)
word |= getmiso(spi) << (bits - 1); word |= getmiso(spi) << rxbit;
setsck(spi, cpol); setsck(spi, cpol);
} }
return word; return word;
...@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi, ...@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
{ {
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
u8 rxbit = bits - 1;
u32 oldbit = !(word & 1); u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */ /* clock starts at inactive polarity */
for (; likely(bits); bits--) { for (; likely(bits); bits--) {
...@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi, ...@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */ /* sample LSB (from slave) on trailing edge */
word >>= 1; word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0) if ((flags & SPI_MASTER_NO_RX) == 0)
word |= getmiso(spi) << (bits - 1); word |= getmiso(spi) << rxbit;
} }
return word; return word;
} }
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#define CQSPI_DISABLE_DAC_MODE BIT(1) #define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
#define CQSPI_SLOW_SRAM BIT(4)
/* Capabilities */ /* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0) #define CQSPI_SUPPORTS_OCTAL BIT(0)
...@@ -87,6 +88,7 @@ struct cqspi_st { ...@@ -87,6 +88,7 @@ struct cqspi_st {
bool use_dma_read; bool use_dma_read;
u32 pd_dev_id; u32 pd_dev_id;
bool wr_completion; bool wr_completion;
bool slow_sram;
}; };
struct cqspi_driver_platdata { struct cqspi_driver_platdata {
...@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) ...@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
} }
} }
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; else if (!cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
else
irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status) if (irq_status)
complete(&cqspi->transfer_complete); complete(&cqspi->transfer_complete);
...@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, ...@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
/* Clear all interrupts. */ /* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); /*
* On SoCFPGA platform reading the SRAM is slow due to
* hardware limitation and causing read interrupt storm to CPU,
* so enabling only watermark interrupt to disable all read
* interrupts later as we want to run "bytes to read" loop with
* all the read interrupts disabled for max performance.
*/
if (!cqspi->slow_sram)
writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
else
writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete); reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK, writel(CQSPI_REG_INDIRECTRD_START_MASK,
...@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, ...@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
/*
* Disable all read interrupts until
* we are out of "bytes to read"
*/
if (cqspi->slow_sram)
writel(0x0, reg_base + CQSPI_REG_IRQMASK);
bytes_to_read = cqspi_get_rd_sram_level(cqspi); bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) { if (ret && bytes_to_read == 0) {
...@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, ...@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
bytes_to_read = cqspi_get_rd_sram_level(cqspi); bytes_to_read = cqspi_get_rd_sram_level(cqspi);
} }
if (remaining > 0) if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete); reinit_completion(&cqspi->transfer_complete);
if (cqspi->slow_sram)
writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
}
} }
/* Check indirect done status */ /* Check indirect done status */
...@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev) ...@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_dma_read = true; cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false; cqspi->wr_completion = false;
if (ddata->quirks & CQSPI_SLOW_SRAM)
cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node, if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0")) "xlnx,versal-ospi-1.0"))
...@@ -1779,7 +1807,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = { ...@@ -1779,7 +1807,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
}; };
static const struct cqspi_driver_platdata socfpga_qspi = { static const struct cqspi_driver_platdata socfpga_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION, .quirks = CQSPI_DISABLE_DAC_MODE
| CQSPI_NO_SUPPORT_WR_COMPLETION
| CQSPI_SLOW_SRAM,
}; };
static const struct cqspi_driver_platdata versal_ospi = { static const struct cqspi_driver_platdata versal_ospi = {
......
...@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi) ...@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->num_chipselect = mux_control_states(priv->mux); ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1; ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node; ctlr->dev.of_node = spi->dev.of_node;
ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr); ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret) if (ret)
......
...@@ -1727,8 +1727,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ...@@ -1727,8 +1727,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags); spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy); ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
if (!ret) kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL; ctlr->cur_msg = NULL;
ctlr->fallback = false; ctlr->fallback = false;
...@@ -4033,7 +4032,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) ...@@ -4033,7 +4032,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* guard against reentrancy from a different context. The io_mutex * guard against reentrancy from a different context. The io_mutex
* will catch those cases. * will catch those cases.
*/ */
if (READ_ONCE(ctlr->queue_empty)) { if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0; message->actual_length = 0;
message->status = -EINPROGRESS; message->status = -EINPROGRESS;
......
...@@ -469,6 +469,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch ...@@ -469,6 +469,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* SPI_TRANS_FAIL_NO_START. * SPI_TRANS_FAIL_NO_START.
* @queue_empty: signal green light for opportunistically skipping the queue * @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers. * for spi_sync transfers.
* @must_async: disable all fast paths in the core
* *
* Each SPI controller can communicate with one or more @spi_device * Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals * children. These make a small bus, sharing MOSI, MISO and SCK signals
...@@ -690,6 +691,7 @@ struct spi_controller { ...@@ -690,6 +691,7 @@ struct spi_controller {
/* Flag for enabling opportunistic skipping of the queue in spi_sync */ /* Flag for enabling opportunistic skipping of the queue in spi_sync */
bool queue_empty; bool queue_empty;
bool must_async;
}; };
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment