Commit 599beede authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mmc-v5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "A couple of MMC host fixes intended for v5.1:

   - alcor: Fix DMA reads

   - renesas_sdhi: Limit block count to 16-bit for old revisions

   - sdhci-omap: Fixup support for read-only pins

   - mxcmmc: Revert support for highmem pages

   - davinci/pxamci: Fix clang build warnings"

* tag 'mmc-v5.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: renesas_sdhi: limit block count to 16 bit for old revisions
  mmc: alcor: fix DMA reads
  mmc: sdhci-omap: Set caps2 to indicate no physical write protect pin
  mmc: mxcmmc: "Revert mmc: mxcmmc: handle highmem pages"
  mmc: davinci: remove extraneous __init annotation
  mmc: pxamci: fix enum type confusion
parents fd1f297b c9a9497c
...@@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host) ...@@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
mmc->caps2 = MMC_CAP2_NO_SDIO; mmc->caps2 = MMC_CAP2_NO_SDIO;
mmc->ops = &alcor_sdc_ops; mmc->ops = &alcor_sdc_ops;
/* Hardware cannot do scatter lists */ /* The hardware does DMA data transfer of 4096 bytes to/from a single
* buffer address. Scatterlists are not supported, but upon DMA
* completion (signalled via IRQ), the original vendor driver does
* then immediately set up another DMA transfer of the next 4096
* bytes.
*
* This means that we need to handle the I/O in 4096 byte chunks.
* Lacking a way to limit the sglist entries to 4096 bytes, we instead
* impose that only one segment is provided, with maximum size 4096,
* which also happens to be the minimum size. This means that the
* single-entry sglist handled by this driver can be handed directly
* to the hardware, nice and simple.
*
* Unfortunately though, that means we only do 4096 bytes I/O per
* MMC command. A future improvement would be to make the driver
* accept sg lists and entries of any size, and simply iterate
* through them 4096 bytes at a time.
*/
mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
mmc->max_req_size = mmc->max_seg_size;
mmc->max_blk_size = mmc->max_seg_size;
mmc->max_blk_count = mmc->max_segs;
mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
} }
static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
......
...@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) ...@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{ {
} }
#endif #endif
static void __init init_mmcsd_host(struct mmc_davinci_host *host) static void init_mmcsd_host(struct mmc_davinci_host *host)
{ {
mmc_davinci_reset_ctrl(host, 1); mmc_davinci_reset_ctrl(host, 1);
......
...@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data) ...@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
for_each_sg(data->sg, sg, data->sg_len, i) { for_each_sg(data->sg, sg, data->sg_len, i)
void *buf = kmap_atomic(sg_page(sg) + sg->offset); buffer_swap32(sg_virt(sg), sg->length);
buffer_swap32(buf, sg->length);
kunmap_atomic(buf);
}
} }
#else #else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {} static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
...@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) ...@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
{ {
struct mmc_data *data = host->req->data; struct mmc_data *data = host->req->data;
struct scatterlist *sg; struct scatterlist *sg;
void *buf;
int stat, i; int stat, i;
host->data = data; host->data = data;
...@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host) ...@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
if (data->flags & MMC_DATA_READ) { if (data->flags & MMC_DATA_READ) {
for_each_sg(data->sg, sg, data->sg_len, i) { for_each_sg(data->sg, sg, data->sg_len, i) {
buf = kmap_atomic(sg_page(sg) + sg->offset); stat = mxcmci_pull(host, sg_virt(sg), sg->length);
stat = mxcmci_pull(host, buf, sg->length);
kunmap(buf);
if (stat) if (stat)
return stat; return stat;
host->datasize += sg->length; host->datasize += sg->length;
} }
} else { } else {
for_each_sg(data->sg, sg, data->sg_len, i) { for_each_sg(data->sg, sg, data->sg_len, i) {
buf = kmap_atomic(sg_page(sg) + sg->offset); stat = mxcmci_push(host, sg_virt(sg), sg->length);
stat = mxcmci_push(host, buf, sg->length);
kunmap(buf);
if (stat) if (stat)
return stat; return stat;
host->datasize += sg->length; host->datasize += sg->length;
......
...@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param); ...@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{ {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
enum dma_data_direction direction; enum dma_transfer_direction direction;
struct dma_slave_config config; struct dma_slave_config config;
struct dma_chan *chan; struct dma_chan *chan;
unsigned int nob = data->blocks; unsigned int nob = data->blocks;
......
...@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, ...@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
struct renesas_sdhi *priv; struct renesas_sdhi *priv;
struct resource *res; struct resource *res;
int irq, ret, i; int irq, ret, i;
u16 ver;
of_data = of_device_get_match_data(&pdev->dev); of_data = of_device_get_match_data(&pdev->dev);
...@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev, ...@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret) if (ret)
goto efree; goto efree;
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
mmc_data->max_blk_count = U16_MAX;
ret = tmio_mmc_host_probe(host); ret = tmio_mmc_host_probe(host);
if (ret < 0) if (ret < 0)
goto edisclk; goto edisclk;
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */ /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) if (ver == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
/* Enable tuning iff we have an SCC and a supported mode */ /* Enable tuning iff we have an SCC and a supported mode */
......
...@@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) ...@@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
mmc->f_max = 48000000; mmc->f_max = 48000000;
} }
if (!mmc_can_gpio_ro(mmc))
mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
pltfm_host->clk = devm_clk_get(dev, "fck"); pltfm_host->clk = devm_clk_get(dev, "fck");
if (IS_ERR(pltfm_host->clk)) { if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk); ret = PTR_ERR(pltfm_host->clk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment