Commit e03dc5d3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mtd/fixes-for-4.17-rc5' of git://git.infradead.org/linux-mtd

Pull mtd fixes from Boris Brezillon:

 - make nand_soft_waitrdy() wait tWB before polling the status REG

 - fix BCH write in the the Marvell NAND controller driver

 - fix wrong picosec to msec conversion in the Marvell NAND controller
   driver

 - fix DMA handling in the TI OneNAND controllre driver

* tag 'mtd/fixes-for-4.17-rc5' of git://git.infradead.org/linux-mtd:
  mtd: rawnand: Make sure we wait tWB before polling the STATUS reg
  mtd: rawnand: marvell: fix command xtype in BCH write hook
  mtd: rawnand: marvell: pass ms delay to wait_op
  mtd: onenand: omap2: Disable DMA for HIGHMEM buffers
parents ca30093d 3057fcef
...@@ -375,31 +375,21 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, ...@@ -375,31 +375,21 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
{ {
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv; struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst; struct device *dev = &c->pdev->dev;
int bram_offset;
void *buf = (void *)buffer; void *buf = (void *)buffer;
dma_addr_t dma_src, dma_dst;
int bram_offset, err;
size_t xtra; size_t xtra;
int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384) /*
goto out_copy; * If the buffer address is not DMA-able, len is not long enough to make
* DMA transfers profitable or panic_write() may be in an interrupt
/* panic_write() may be in an interrupt context */ * context fallback to PIO mode.
if (in_interrupt() || oops_in_progress) */
goto out_copy; if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || in_interrupt() || oops_in_progress )
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy; goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
xtra = count & 3; xtra = count & 3;
if (xtra) { if (xtra) {
...@@ -407,25 +397,21 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, ...@@ -407,25 +397,21 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
memcpy(buf + count, this->base + bram_offset + count, xtra); memcpy(buf + count, this->base + bram_offset + count, xtra);
} }
dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
dma_src = c->phys_base + bram_offset; dma_src = c->phys_base + bram_offset;
dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
goto out_copy;
}
ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
if (ret) { if (dma_mapping_error(dev, dma_dst)) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
goto out_copy; goto out_copy;
} }
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
if (!err)
return 0; return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy: out_copy:
memcpy(buf, this->base + bram_offset, count); memcpy(buf, this->base + bram_offset, count);
return 0; return 0;
...@@ -437,50 +423,35 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, ...@@ -437,50 +423,35 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
{ {
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv; struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst; struct device *dev = &c->pdev->dev;
int bram_offset;
void *buf = (void *)buffer; void *buf = (void *)buffer;
int ret; dma_addr_t dma_src, dma_dst;
int bram_offset, err;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384) /*
goto out_copy; * If the buffer address is not DMA-able, len is not long enough to make
* DMA transfers profitable or panic_write() may be in an interrupt
/* panic_write() may be in an interrupt context */ * context fallback to PIO mode.
if (in_interrupt() || oops_in_progress) */
goto out_copy; if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
count < 384 || in_interrupt() || oops_in_progress )
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy; goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset; dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_src)) { if (dma_mapping_error(dev, dma_src)) {
dev_err(&c->pdev->dev, dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
"Couldn't DMA map a %d byte buffer\n",
count);
return -1;
}
ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy; goto out_copy;
} }
err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
if (!err)
return 0; return 0;
dev_err(dev, "timeout waiting for DMA\n");
out_copy: out_copy:
memcpy(this->base + bram_offset, buf, count); memcpy(this->base + bram_offset, buf, count);
return 0; return 0;
......
...@@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, ...@@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
return ret; return ret;
ret = marvell_nfc_wait_op(chip, ret = marvell_nfc_wait_op(chip,
chip->data_interface.timings.sdr.tPROG_max); PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
return ret; return ret;
} }
...@@ -1408,6 +1408,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, ...@@ -1408,6 +1408,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
u32 xtype;
int ret; int ret;
struct marvell_nfc_op nfc_op = { struct marvell_nfc_op nfc_op = {
.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD, .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
...@@ -1423,7 +1424,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk, ...@@ -1423,7 +1424,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
* last naked write. * last naked write.
*/ */
if (chunk == 0) { if (chunk == 0) {
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) | if (lt->nchunks == 1)
xtype = XTYPE_MONOLITHIC_RW;
else
xtype = XTYPE_WRITE_DISPATCH;
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
NDCB0_ADDR_CYC(marvell_nand->addr_cyc) | NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
NDCB0_CMD1(NAND_CMD_SEQIN); NDCB0_CMD1(NAND_CMD_SEQIN);
nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page); nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
...@@ -1494,7 +1500,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd, ...@@ -1494,7 +1500,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
} }
ret = marvell_nfc_wait_op(chip, ret = marvell_nfc_wait_op(chip,
chip->data_interface.timings.sdr.tPROG_max); PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
marvell_nfc_disable_hw_ecc(chip); marvell_nfc_disable_hw_ecc(chip);
......
...@@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo) ...@@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
*/ */
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{ {
const struct nand_sdr_timings *timings;
u8 status = 0; u8 status = 0;
int ret; int ret;
if (!chip->exec_op) if (!chip->exec_op)
return -ENOTSUPP; return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
timings = nand_get_sdr_timings(&chip->data_interface);
ndelay(PSEC_TO_NSEC(timings->tWB_max));
ret = nand_status_op(chip, NULL); ret = nand_status_op(chip, NULL);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment