Commit d31aeb78 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mmc-v4.20-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "MMC core:
   - Restore code to allow BKOPS and CACHE ctrl even if no HPI support
   - Reset HPI enabled state during re-init
   - Use a default minimum timeout when enabling CACHE ctrl

  MMC host:
   - omap_hsmmc: Fix DMA API warning
   - sdhci-tegra: Fix dt parsing of SDMMC pads autocal values
   - Correct register accesses when enabling v4 mode"

* tag 'mmc-v4.20-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: core: Use a minimum 1600ms timeout when enabling CACHE ctrl
  mmc: core: Allow BKOPS and CACHE ctrl even if no HPI support
  mmc: core: Reset HPI enabled state during re-init and in case of errors
  mmc: omap_hsmmc: fix DMA API warning
  mmc: tegra: Fix for SDMMC pads autocal parsing from dt
  mmc: sdhci: Fix sdhci_do_enable_v4_mode
parents a837eca2 e3ae3401
......@@ -30,6 +30,7 @@
#include "pwrseq.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
#define MIN_CACHE_EN_TIMEOUT_MS 1600
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
......@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
if (!mmc_card_broken_hpi(card) &&
ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
......@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
card->ext_csd.hpi_en = 0;
err = 0;
} else
} else {
card->ext_csd.hpi_en = 1;
}
}
/*
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
* If cache size is higher than 0, this indicates the existence of cache
* and it can be turned on. Note that some eMMCs from Micron has been
* reported to need ~800 ms timeout, while enabling the cache after
* sudden power failure tests. Let's extend the timeout to a minimum of
* DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
*/
if (!mmc_card_broken_hpi(card) &&
card->ext_csd.cache_size > 0) {
if (card->ext_csd.cache_size > 0) {
unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1,
card->ext_csd.generic_cmd6_time);
EXT_CSD_CACHE_CTRL, 1, timeout_ms);
if (err && err != -EBADMSG)
goto free_card;
......
......@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
......@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
/*
* Limit the maximum segment size to the lower of the request size
* and the DMA engine device segment size limits. In reality, with
* 32-bit transfers, the DMA engine can do longer segments than this
* but there is no way to represent that in the DMA model - if we
* increase this figure here, we get warnings from the DMA API debug.
*/
mmc->max_seg_size = min3(mmc->max_req_size,
dma_get_max_seg_size(host->rx_chan->device->dev),
dma_get_max_seg_size(host->tx_chan->device->dev));
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
......
......@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
&autocal->pull_up_3v3);
&autocal->pull_up_3v3_timeout);
if (err)
autocal->pull_up_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
&autocal->pull_down_3v3);
&autocal->pull_down_3v3_timeout);
if (err)
autocal->pull_down_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
&autocal->pull_up_1v8);
&autocal->pull_up_1v8_timeout);
if (err)
autocal->pull_up_1v8_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
&autocal->pull_down_1v8);
&autocal->pull_down_1v8_timeout);
if (err)
autocal->pull_down_1v8_timeout = 0;
......
......@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
{
u16 ctrl2;
ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl2 & SDHCI_CTRL_V4_MODE)
return;
ctrl2 |= SDHCI_CTRL_V4_MODE;
sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment