Commit 063d6a4c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mmc-v5.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "MMC core:
   - sdio: Restore ~20% performance drop for SDHCI drivers, by using
     mmc_pre_req() and mmc_post_req() for SDIO requests.

  MMC host:
   - sdhci-of-esdhc: Fix support for erratum eSDHC7
   - mmc_spi: Allow the driver to be built when CONFIG_HAS_DMA is unset
   - sdhci-msm: Use retries to fix tuning
   - sdhci-acpi: Fix resume for eMMC HS400 mode"

* tag 'mmc-v5.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: sdio: Use mmc_pre_req() / mmc_post_req()
  mmc: sdhci-of-esdhc: Don't walk device-tree on every interrupt
  mmc: mmc_spi: Allow the driver to be built when CONFIG_HAS_DMA is unset
  mmc: sdhci-msm: Add retries when all tuning phases are found valid
  mmc: sdhci-acpi: Clear amd_sdhci_host on reset
parents d67f2ec1 f0c393e2
......@@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
int err;
WARN_ON(blksz == 0);
......@@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
mmc_pre_req(card->host, &mrq);
if (nents > 1)
sg_free_table(&sgtable);
mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
if (mmc_host_is_spi(card->host)) {
err = cmd.error;
else if (data.error)
err = data.error;
else if (mmc_host_is_spi(card->host))
/* host driver already reported errors */
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
err = 0;
else if (cmd.resp[0] & R5_ERROR)
err = -EIO;
else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
err = -EINVAL;
else if (cmd.resp[0] & R5_OUT_OF_RANGE)
err = -ERANGE;
else
err = 0;
return 0;
mmc_post_req(card->host, &mrq, err);
if (nents > 1)
sg_free_table(&sgtable);
return err;
}
int sdio_reset(struct mmc_host *host)
......
......@@ -602,7 +602,7 @@ config MMC_GOLDFISH
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER && HAS_DMA
depends on SPI_MASTER
select CRC7
select CRC_ITU_T
help
......
......@@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
#ifdef CONFIG_HAS_DMA
static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
{
struct spi_device *spi = host->spi;
struct device *dev;
if (!spi->master->dev.parent->dma_mask)
return 0;
dev = spi->master->dev.parent;
host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, host->ones_dma))
return -ENOMEM;
host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, host->data_dma)) {
dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
return -ENOMEM;
}
dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
host->dma_dev = dev;
return 0;
}
static void mmc_spi_dma_free(struct mmc_spi_host *host)
{
if (!host->dma_dev)
return;
dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
DMA_TO_DEVICE);
dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
}
#else
static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
#endif
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
......@@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi)
if (!host->data)
goto fail_nobuf1;
if (spi->master->dev.parent->dma_mask) {
struct device *dev = spi->master->dev.parent;
host->dma_dev = dev;
host->ones_dma = dma_map_single(dev, ones,
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, host->ones_dma))
goto fail_ones_dma;
host->data_dma = dma_map_single(dev, host->data,
sizeof(*host->data), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, host->data_dma))
goto fail_data_dma;
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*host->data),
DMA_BIDIRECTIONAL);
}
status = mmc_spi_dma_alloc(host);
if (status)
goto fail_dma;
/* setup message for status/busy readback */
spi_message_init(&host->readback);
......@@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi)
fail_add_host:
mmc_remove_host(mmc);
fail_glue_init:
if (host->dma_dev)
dma_unmap_single(host->dma_dev, host->data_dma,
sizeof(*host->data), DMA_BIDIRECTIONAL);
fail_data_dma:
if (host->dma_dev)
dma_unmap_single(host->dma_dev, host->ones_dma,
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
fail_ones_dma:
mmc_spi_dma_free(host);
fail_dma:
kfree(host->data);
fail_nobuf1:
mmc_free_host(mmc);
mmc_spi_put_pdata(spi);
nomem:
kfree(ones);
return status;
......@@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi)
mmc_remove_host(mmc);
if (host->dma_dev) {
dma_unmap_single(host->dma_dev, host->ones_dma,
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
dma_unmap_single(host->dma_dev, host->data_dma,
sizeof(*host->data), DMA_BIDIRECTIONAL);
}
mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
......
......@@ -551,12 +551,18 @@ static int amd_select_drive_strength(struct mmc_card *card,
return MMC_SET_DRIVER_TYPE_A;
}
static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable)
{
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
/* AMD Platform requires dll setting */
sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
usleep_range(10, 20);
sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
if (enable)
sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
amd_host->dll_enabled = enable;
}
/*
......@@ -596,10 +602,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* DLL is only required for HS400 */
if (host->timing == MMC_TIMING_MMC_HS400 &&
!amd_host->dll_enabled) {
sdhci_acpi_amd_hs400_dll(host);
amd_host->dll_enabled = true;
}
!amd_host->dll_enabled)
sdhci_acpi_amd_hs400_dll(host, true);
}
}
......@@ -620,10 +624,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
return err;
}
static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
if (mask & SDHCI_RESET_ALL) {
amd_host->tuned_clock = false;
sdhci_acpi_amd_hs400_dll(host, false);
}
sdhci_reset(host, mask);
}
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
......
......@@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
int tuning_seq_cnt = 3;
int tuning_seq_cnt = 10;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
struct mmc_ios ios = host->mmc->ios;
......@@ -1222,6 +1222,22 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
} while (++phase < ARRAY_SIZE(tuned_phases));
if (tuned_phase_cnt) {
if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
/*
* All phases valid is _almost_ as bad as no phases
* valid. Probably all phases are not really reliable
* but we didn't detect where the unreliable place is.
* That means we'll essentially be guessing and hoping
* we get a good phase. Better to try a few times.
*/
dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
mmc_hostname(mmc));
if (--tuning_seq_cnt) {
tuned_phase_cnt = 0;
goto retry;
}
}
rc = msm_find_most_appropriate_phase(host, tuned_phases,
tuned_phase_cnt);
if (rc < 0)
......
......@@ -81,6 +81,7 @@ struct sdhci_esdhc {
bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
bool quirk_delay_before_data_reset;
bool quirk_trans_complete_erratum;
bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
......@@ -1177,10 +1178,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 command;
if (of_find_compatible_node(NULL, NULL,
"fsl,p2020-esdhc")) {
if (esdhc->quirk_trans_complete_erratum) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if (command == MMC_WRITE_MULTIPLE_BLOCK &&
......@@ -1334,8 +1336,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
esdhc->quirk_delay_before_data_reset = true;
esdhc->quirk_trans_complete_erratum = true;
}
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment