Commit ae77c958 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mmc-v4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC host fixes from Ulf Hansson:

 - renesas_sdhi: Fix build error in case NO_DMA=y

 - sdhci: Implement a bounce buffer to address throughput regressions

* tag 'mmc-v4.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: MMC_SDHI_{SYS,INTERNAL}_DMAC should depend on HAS_DMA
  mmc: sdhci: Implement an SDHCI-specific bounce buffer
parents 20f9aa22 56174d9a
...@@ -605,7 +605,7 @@ config MMC_SDHI ...@@ -605,7 +605,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC" tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI depends on MMC_SDHI && HAS_DMA
default MMC_SDHI if (SUPERH || ARM) default MMC_SDHI if (SUPERH || ARM)
help help
This provides DMA support for SDHI SD/SDIO controllers This provides DMA support for SDHI SD/SDIO controllers
...@@ -615,7 +615,7 @@ config MMC_SDHI_SYS_DMAC ...@@ -615,7 +615,7 @@ config MMC_SDHI_SYS_DMAC
config MMC_SDHI_INTERNAL_DMAC config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering" tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI depends on MMC_SDHI && HAS_DMA
default MMC_SDHI if ARM64 default MMC_SDHI if ARM64
help help
This provides DMA support for SDHI SD/SDIO controllers This provides DMA support for SDHI SD/SDIO controllers
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
...@@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host, ...@@ -502,8 +503,35 @@ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
if (data->host_cookie == COOKIE_PRE_MAPPED) if (data->host_cookie == COOKIE_PRE_MAPPED)
return data->sg_count; return data->sg_count;
sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, /* Bounce write requests to the bounce buffer */
if (host->bounce_buffer) {
unsigned int length = data->blksz * data->blocks;
if (length > host->bounce_buffer_size) {
pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
mmc_hostname(host->mmc), length,
host->bounce_buffer_size);
return -EIO;
}
if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
/* Copy the data to the bounce buffer */
sg_copy_to_buffer(data->sg, data->sg_len,
host->bounce_buffer,
length);
}
/* Switch ownership to the DMA */
dma_sync_single_for_device(host->mmc->parent,
host->bounce_addr,
host->bounce_buffer_size,
mmc_get_dma_dir(data)); mmc_get_dma_dir(data));
/* Just a dummy value */
sg_count = 1;
} else {
/* Just access the data directly from memory */
sg_count = dma_map_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
if (sg_count == 0) if (sg_count == 0)
return -ENOSPC; return -ENOSPC;
...@@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host, ...@@ -673,6 +701,14 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
} }
} }
static u32 sdhci_sdma_address(struct sdhci_host *host)
{
if (host->bounce_buffer)
return host->bounce_addr;
else
return sg_dma_address(host->data->sg);
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{ {
u8 count; u8 count;
...@@ -858,7 +894,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) ...@@ -858,7 +894,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
SDHCI_ADMA_ADDRESS_HI); SDHCI_ADMA_ADDRESS_HI);
} else { } else {
WARN_ON(sg_cnt != 1); WARN_ON(sg_cnt != 1);
sdhci_writel(host, sg_dma_address(data->sg), sdhci_writel(host, sdhci_sdma_address(host),
SDHCI_DMA_ADDRESS); SDHCI_DMA_ADDRESS);
} }
} }
...@@ -2255,7 +2291,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) ...@@ -2255,7 +2291,12 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
mrq->data->host_cookie = COOKIE_UNMAPPED; mrq->data->host_cookie = COOKIE_UNMAPPED;
if (host->flags & SDHCI_REQ_USE_DMA) /*
* No pre-mapping in the pre hook if we're using the bounce buffer,
* for that we would need two bounce buffers since one buffer is
* in flight when this is getting called.
*/
if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
} }
...@@ -2359,8 +2400,45 @@ static bool sdhci_request_done(struct sdhci_host *host) ...@@ -2359,8 +2400,45 @@ static bool sdhci_request_done(struct sdhci_host *host)
struct mmc_data *data = mrq->data; struct mmc_data *data = mrq->data;
if (data && data->host_cookie == COOKIE_MAPPED) { if (data && data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, if (host->bounce_buffer) {
/*
* On reads, copy the bounced data into the
* sglist
*/
if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
unsigned int length = data->bytes_xfered;
if (length > host->bounce_buffer_size) {
pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
mmc_hostname(host->mmc),
host->bounce_buffer_size,
data->bytes_xfered);
/* Cap it down and continue */
length = host->bounce_buffer_size;
}
dma_sync_single_for_cpu(
host->mmc->parent,
host->bounce_addr,
host->bounce_buffer_size,
DMA_FROM_DEVICE);
sg_copy_from_buffer(data->sg,
data->sg_len,
host->bounce_buffer,
length);
} else {
/* No copying, just switch ownership */
dma_sync_single_for_cpu(
host->mmc->parent,
host->bounce_addr,
host->bounce_buffer_size,
mmc_get_dma_dir(data));
}
} else {
/* Unmap the raw data */
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
data->sg_len,
mmc_get_dma_dir(data)); mmc_get_dma_dir(data));
}
data->host_cookie = COOKIE_UNMAPPED; data->host_cookie = COOKIE_UNMAPPED;
} }
} }
...@@ -2643,7 +2721,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) ...@@ -2643,7 +2721,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
*/ */
if (intmask & SDHCI_INT_DMA_END) { if (intmask & SDHCI_INT_DMA_END) {
u32 dmastart, dmanow; u32 dmastart, dmanow;
dmastart = sg_dma_address(host->data->sg);
dmastart = sdhci_sdma_address(host);
dmanow = dmastart + host->data->bytes_xfered; dmanow = dmastart + host->data->bytes_xfered;
/* /*
* Force update to the next DMA block boundary. * Force update to the next DMA block boundary.
...@@ -3234,6 +3313,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1) ...@@ -3234,6 +3313,68 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
} }
EXPORT_SYMBOL_GPL(__sdhci_read_caps); EXPORT_SYMBOL_GPL(__sdhci_read_caps);
static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
{
struct mmc_host *mmc = host->mmc;
unsigned int max_blocks;
unsigned int bounce_size;
int ret;
/*
* Cap the bounce buffer at 64KB. Using a bigger bounce buffer
* has diminishing returns, this is probably because SD/MMC
* cards are usually optimized to handle this size of requests.
*/
bounce_size = SZ_64K;
/*
* Adjust downwards to maximum request size if this is less
* than our segment size, else hammer down the maximum
* request size to the maximum buffer size.
*/
if (mmc->max_req_size < bounce_size)
bounce_size = mmc->max_req_size;
max_blocks = bounce_size / 512;
/*
* When we just support one segment, we can get significant
* speedups by the help of a bounce buffer to group scattered
* reads/writes together.
*/
host->bounce_buffer = devm_kmalloc(mmc->parent,
bounce_size,
GFP_KERNEL);
if (!host->bounce_buffer) {
pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
mmc_hostname(mmc),
bounce_size);
/*
* Exiting with zero here makes sure we proceed with
* mmc->max_segs == 1.
*/
return 0;
}
host->bounce_addr = dma_map_single(mmc->parent,
host->bounce_buffer,
bounce_size,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(mmc->parent, host->bounce_addr);
if (ret)
/* Again fall back to max_segs == 1 */
return 0;
host->bounce_buffer_size = bounce_size;
/* Lie about this since we're bouncing */
mmc->max_segs = max_blocks;
mmc->max_seg_size = bounce_size;
mmc->max_req_size = bounce_size;
pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
mmc_hostname(mmc), max_blocks, bounce_size);
return 0;
}
int sdhci_setup_host(struct sdhci_host *host) int sdhci_setup_host(struct sdhci_host *host)
{ {
struct mmc_host *mmc; struct mmc_host *mmc;
...@@ -3730,6 +3871,13 @@ int sdhci_setup_host(struct sdhci_host *host) ...@@ -3730,6 +3871,13 @@ int sdhci_setup_host(struct sdhci_host *host)
*/ */
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
if (mmc->max_segs == 1) {
/* This may alter mmc->*_blk_* parameters */
ret = sdhci_allocate_bounce_buffer(host);
if (ret)
return ret;
}
return 0; return 0;
unreg: unreg:
......
...@@ -440,6 +440,9 @@ struct sdhci_host { ...@@ -440,6 +440,9 @@ struct sdhci_host {
int irq; /* Device IRQ */ int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */ void __iomem *ioaddr; /* Mapped address */
char *bounce_buffer; /* For packing SDMA reads/writes */
dma_addr_t bounce_addr;
unsigned int bounce_buffer_size;
const struct sdhci_ops *ops; /* Low level hw interface */ const struct sdhci_ops *ops; /* Low level hw interface */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment