Commit de3ee99b authored by Linus Walleij's avatar Linus Walleij Committed by Ulf Hansson

mmc: Delete bounce buffer handling

In may, Steven sent a patch deleting the bounce buffer handling
and the CONFIG_MMC_BLOCK_BOUNCE option.

I chose the less invasive path of making it a runtime config
option, and we merged that successfully for kernel v4.12.

The code is however just standing in the way and taking up
space for seemingly no gain on any systems in wide use today.

Pierre says the code was there to improve speed on TI SDHCI
controllers on certain HP laptops and possibly some Ricoh
controllers as well. Early SDHCI controllers lacked the
scatter-gather feature, which made software bounce buffers
a significant speed boost.

We are clearly talking about the list of SDHCI PCI-based
MMC/SD card readers found in the pci_ids[] list in
drivers/mmc/host/sdhci-pci-core.c.

The TI SDHCI derivative is not supported by the upstream
kernel. This leaves the Ricoh.

What we can however notice is that the x86 defconfigs in the
kernel did not enable CONFIG_MMC_BLOCK_BOUNCE option, which
means that any such laptop would have to have a custom
configured kernel to actually take advantage of this
bounce buffer speed-up. It simply seems like there was
a speed optimization for the Ricoh controllers that noone
was using. (I have not checked the distro defconfigs but
I am pretty sure the situation is the same there.)

Bounce buffers increased performance on the OMAP HSMMC
at one point, and was part of the original submission in
commit a45c6cb8 ("[ARM] 5369/1: omap mmc: Add new
   omap hsmmc controller for 2430 and 34xx, v3")

This optimization was removed in
commit 0ccd76d4 ("omap_hsmmc: Implement scatter-gather
   emulation")
which found that scatter-gather emulation provided even
better performance.

The same was introduced for SDHCI in
commit 2134a922 ("sdhci: scatter-gather (ADMA) support")

I am pretty positively convinced that software
scatter-gather emulation will do for any host controller what
the bounce buffers were doing. Essentially, the bounce buffer
was a reimplementation of software scatter-gather-emulation in
the MMC subsystem, and it should be done away with.

Cc: Pierre Ossman <pierre@ossman.eu>
Cc: Juha Yrjola <juha.yrjola@solidboot.com>
Cc: Steven J. Hill <Steven.Hill@cavium.com>
Cc: Shawn Lin <shawn.lin@rock-chips.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Suggested-by: default avatarSteven J. Hill <Steven.Hill@cavium.com>
Suggested-by: default avatarShawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent fb458864
...@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, ...@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
} }
mqrq->areq.mrq = &brq->mrq; mqrq->areq.mrq = &brq->mrq;
mmc_queue_bounce_pre(mqrq);
} }
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
...@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
brq = &mq_rq->brq; brq = &mq_rq->brq;
old_req = mmc_queue_req_to_req(mq_rq); old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) { switch (status) {
case MMC_BLK_SUCCESS: case MMC_BLK_SUCCESS:
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "core.h" #include "core.h"
#include "card.h" #include "card.h"
#define MMC_QUEUE_BOUNCESZ 65536
/* /*
* Prepare a MMC request. This just filters out odd stuff. * Prepare a MMC request. This just filters out odd stuff.
*/ */
...@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
} }
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
{
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
return 0;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > host->max_blk_count * 512)
bouncesz = host->max_blk_count * 512;
if (bouncesz <= 512)
return 0;
return bouncesz;
}
/** /**
* mmc_init_request() - initialize the MMC-specific per-request data * mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue * @q: the request queue
...@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, ...@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
struct mmc_card *card = mq->card; struct mmc_card *card = mq->card;
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
if (card->bouncesz) { mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); if (!mq_rq->sg)
if (!mq_rq->bounce_buf) return -ENOMEM;
return -ENOMEM;
if (card->bouncesz > 512) {
mq_rq->sg = mmc_alloc_sg(1, gfp);
if (!mq_rq->sg)
return -ENOMEM;
mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
gfp);
if (!mq_rq->bounce_sg)
return -ENOMEM;
}
} else {
mq_rq->bounce_buf = NULL;
mq_rq->bounce_sg = NULL;
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
if (!mq_rq->sg)
return -ENOMEM;
}
return 0; return 0;
} }
...@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) ...@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
{ {
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
/* It is OK to kfree(NULL) so this will be smooth */
kfree(mq_rq->bounce_sg);
mq_rq->bounce_sg = NULL;
kfree(mq_rq->bounce_buf);
mq_rq->bounce_buf = NULL;
kfree(mq_rq->sg); kfree(mq_rq->sg);
mq_rq->sg = NULL; mq_rq->sg = NULL;
} }
...@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
/*
* mmc_init_request() depends on card->bouncesz so it must be calculated
* before blk_init_allocated_queue() starts allocating requests.
*/
card->bouncesz = mmc_queue_calc_bouncesz(host);
mq->card = card; mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL); mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue) if (!mq->queue)
...@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card)) if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card); mmc_queue_setup_discard(mq->queue, card);
if (card->bouncesz) { blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); blk_queue_max_hw_sectors(mq->queue,
blk_queue_max_segments(mq->queue, card->bouncesz / 512); min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segment_size(mq->queue, card->bouncesz); blk_queue_max_segments(mq->queue, host->max_segs);
} else { blk_queue_max_segment_size(mq->queue, host->max_seg_size);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
}
sema_init(&mq->thread_sem, 1); sema_init(&mq->thread_sem, 1);
...@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq) ...@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
*/ */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{ {
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
struct request *req = mmc_queue_req_to_req(mqrq); struct request *req = mmc_queue_req_to_req(mqrq);
int i;
if (!mqrq->bounce_buf)
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
return;
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
return;
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, return blk_rq_map_sg(mq->queue, req, mqrq->sg);
mqrq->bounce_buf, mqrq->sg[0].length);
} }
...@@ -49,9 +49,6 @@ enum mmc_drv_op { ...@@ -49,9 +49,6 @@ enum mmc_drv_op {
struct mmc_queue_req { struct mmc_queue_req {
struct mmc_blk_request brq; struct mmc_blk_request brq;
struct scatterlist *sg; struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req areq; struct mmc_async_req areq;
enum mmc_drv_op drv_op; enum mmc_drv_op drv_op;
int drv_op_result; int drv_op_result;
...@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, ...@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *, extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *); struct mmc_queue_req *);
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *); extern int mmc_access_rpmb(struct mmc_queue *);
......
...@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) ...@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
*/ */
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; MMC_CAP_3_3V_DDR;
if (host->use_sg) if (host->use_sg)
mmc->max_segs = 16; mmc->max_segs = 16;
......
...@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) ...@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
pxamci_init_ocr(host); pxamci_init_ocr(host);
/* mmc->caps = 0;
* This architecture used to disable bounce buffers through its
* defconfig, now it is done at runtime as a host property.
*/
mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
host->cmdat = 0; host->cmdat = 0;
if (!cpu_is_pxa25x()) { if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
......
...@@ -316,7 +316,7 @@ struct mmc_host { ...@@ -316,7 +316,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ /* (1 << 21) is free for reuse */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment