Commit 82b62487 authored by Ulf Hansson's avatar Ulf Hansson

Merge branch 'fixes' into next

parents ad9be7ff c53336c8
...@@ -2374,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, ...@@ -2374,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%u%s", card->host->index, subname ? subname : ""); "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
card->ext_csd.data_sector_size);
else
blk_queue_logical_block_size(md->queue.queue, 512);
set_capacity(md->disk, size); set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) { if (mmc_host_cmd23(card->host)) {
......
...@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, ...@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data) if (!data)
return; return;
if (cmd->error || data->error || if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return; return;
......
...@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) ...@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{ {
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH; u64 limit = BLK_BOUNCE_HIGH;
unsigned block_size = 512;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
...@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) ...@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_hw_sectors(mq->queue, blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512)); min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
if (mmc_card_mmc(card))
block_size = card->ext_csd.data_sector_size;
blk_queue_logical_block_size(mq->queue, block_size);
blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
......
...@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) ...@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
(cq_host->num_slots - 1); cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
...@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) ...@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size, cq_host->desc_size,
&cq_host->desc_dma_base, &cq_host->desc_dma_base,
GFP_KERNEL); GFP_KERNEL);
if (!cq_host->desc_base)
return -ENOMEM;
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size, cq_host->data_size,
&cq_host->trans_desc_dma_base, &cq_host->trans_desc_dma_base,
GFP_KERNEL); GFP_KERNEL);
if (!cq_host->desc_base || !cq_host->trans_desc_base) if (!cq_host->trans_desc_base) {
dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
cq_host->desc_base,
cq_host->desc_dma_base);
cq_host->desc_base = NULL;
cq_host->desc_dma_base = 0;
return -ENOMEM; return -ENOMEM;
}
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
......
...@@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi) ...@@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL; mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc); mmc_gpiod_request_cd_irq(mmc);
} }
mmc_detect_change(mmc, 0);
/* Index 1 is write protect/read only */ /* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
......
...@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { ...@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300, .scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps, .taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
.max_blk_count = 0xffffffff,
}; };
/* Definitions for sampling clocks */ /* Definitions for sampling clocks */
......
...@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, ...@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
} }
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
iowrite32(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count) const u32 *buf, int count)
{ {
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h> #include <linux/mmc/sdio.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/sizes.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, ...@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false; return false;
} }
static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{ {
struct mmc_host *mmc = host->mmc; struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata; struct tmio_mmc_data *pdata = host->pdata;
...@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) ...@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status; unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
return; return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS); status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
...@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) ...@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc); mmc_signal_sdio_irq(mmc);
return ireg;
} }
irqreturn_t tmio_mmc_irq(int irq, void *devid) irqreturn_t tmio_mmc_irq(int irq, void *devid)
...@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) ...@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status)) if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED; return IRQ_HANDLED;
__tmio_mmc_sdio_irq(host); if (__tmio_mmc_sdio_irq(host))
return IRQ_HANDLED;
return IRQ_HANDLED; return IRQ_NONE;
} }
EXPORT_SYMBOL_GPL(tmio_mmc_irq); EXPORT_SYMBOL_GPL(tmio_mmc_irq);
...@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, ...@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */ /* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); if (host->mmc->max_blk_count >= SZ_64K)
sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
else
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data); tmio_mmc_start_dma(host, data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment