ide: move command related fields from ide_hwif_t to struct ide_cmd

* Move command related fields from ide_hwif_t to struct ide_cmd.

* Make ide_init_sg_cmd() take command and sectors number as arguments.

There should be no functional changes caused by this patch.
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent adb1af98
......@@ -215,7 +215,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
struct request *rq = hwif->rq;
_auide_hwif *ahwif = &auide_hwif;
struct scatterlist *sg;
int i = hwif->sg_nents, iswrite, count = 0;
int i = hwif->cmd.sg_nents, iswrite, count = 0;
iswrite = (rq_data_dir(rq) == WRITE);
/* Save for interrupt context */
......
......@@ -344,7 +344,7 @@ static int icside_dma_setup(ide_drive_t *drive)
* Tell the DMA engine about the SG table and
* data direction.
*/
set_dma_sg(ec->dma, hwif->sg_table, hwif->sg_nents);
set_dma_sg(ec->dma, hwif->sg_table, hwif->cmd.sg_nents);
set_dma_mode(ec->dma, dma_mode);
drive->waiting_for_dma = 1;
......
......@@ -104,14 +104,14 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
lba48 = 0;
}
if (!dma) {
ide_init_sg_cmd(drive, rq);
ide_map_sg(drive, rq);
}
memset(&cmd, 0, sizeof(cmd));
cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
if (dma == 0) {
ide_init_sg_cmd(&cmd, nsectors);
ide_map_sg(drive, rq);
}
if (drive->dev_flags & IDE_DFLAG_LBA) {
if (lba48) {
pr_debug("%s: LBA=0x%012llx\n", drive->name,
......@@ -170,7 +170,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
/* fallback to PIO */
cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
ide_tf_set_cmd(drive, &cmd, 0);
ide_init_sg_cmd(drive, rq);
ide_init_sg_cmd(&cmd, nsectors);
rc = do_rw_taskfile(drive, &cmd);
}
......
......@@ -120,7 +120,7 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
struct scatterlist *sg;
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
u32 cur_addr, cur_len, xcount, bcount;
cur_addr = sg_dma_address(sg);
......
......@@ -128,21 +128,22 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
struct ide_cmd *cmd = &hwif->cmd;
int i;
ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = DMA_FROM_DEVICE;
cmd->sg_dma_direction = DMA_FROM_DEVICE;
else
hwif->sg_dma_direction = DMA_TO_DEVICE;
cmd->sg_dma_direction = DMA_TO_DEVICE;
i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction);
if (i == 0)
ide_map_sg(drive, rq);
else {
hwif->orig_sg_nents = hwif->sg_nents;
hwif->sg_nents = i;
cmd->orig_sg_nents = cmd->sg_nents;
cmd->sg_nents = i;
}
return i;
......@@ -162,9 +163,10 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
void ide_destroy_dmatable(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_cmd *cmd = &hwif->cmd;
dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
hwif->sg_dma_direction);
dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents,
cmd->sg_dma_direction);
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
......
......@@ -244,6 +244,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
{
struct ide_disk_obj *floppy = drive->driver_data;
ide_hwif_t *hwif = drive->hwif;
struct ide_cmd *cmd = &hwif->cmd;
struct ide_atapi_pc *pc;
if (drive->debug_mask & IDE_DBG_RQ)
......@@ -285,12 +286,12 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
}
if (blk_fs_request(rq) || pc->req_xfer) {
ide_init_sg_cmd(drive, rq);
ide_init_sg_cmd(cmd, rq->nr_sectors);
ide_map_sg(drive, rq);
}
pc->sg = hwif->sg_table;
pc->sg_cnt = hwif->sg_nents;
pc->sg_cnt = cmd->sg_nents;
pc->rq = rq;
......
......@@ -274,30 +274,26 @@ static ide_startstop_t do_special (ide_drive_t *drive)
void ide_map_sg(ide_drive_t *drive, struct request *rq)
{
ide_hwif_t *hwif = drive->hwif;
struct ide_cmd *cmd = &hwif->cmd;
struct scatterlist *sg = hwif->sg_table;
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
hwif->sg_nents = 1;
cmd->sg_nents = 1;
} else if (!rq->bio) {
sg_init_one(sg, rq->data, rq->data_len);
hwif->sg_nents = 1;
} else {
hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
cmd->sg_nents = 1;
} else
cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
EXPORT_SYMBOL_GPL(ide_map_sg);
void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
void ide_init_sg_cmd(struct ide_cmd *cmd, int nsect)
{
ide_hwif_t *hwif = drive->hwif;
hwif->nsect = hwif->nleft = rq->nr_sectors;
hwif->cursg_ofs = 0;
hwif->cursg = NULL;
cmd->nsect = cmd->nleft = nsect;
cmd->cursg_ofs = 0;
cmd->cursg = NULL;
}
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
/**
......@@ -323,7 +319,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
case TASKFILE_OUT:
case TASKFILE_MULTI_IN:
case TASKFILE_IN:
ide_init_sg_cmd(drive, rq);
ide_init_sg_cmd(cmd, rq->nr_sectors);
ide_map_sg(drive, rq);
default:
break;
......
......@@ -209,7 +209,7 @@ static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd,
{
ide_hwif_t *hwif = drive->hwif;
struct scatterlist *sg = hwif->sg_table;
struct scatterlist *cursg = hwif->cursg;
struct scatterlist *cursg = cmd->cursg;
struct page *page;
#ifdef CONFIG_HIGHMEM
unsigned long flags;
......@@ -217,14 +217,14 @@ static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd,
unsigned int offset;
u8 *buf;
cursg = hwif->cursg;
cursg = cmd->cursg;
if (!cursg) {
cursg = sg;
hwif->cursg = sg;
cmd->cursg = sg;
}
page = sg_page(cursg);
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
offset = cursg->offset + cmd->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
page = nth_page(page, (offset >> PAGE_SHIFT));
......@@ -235,12 +235,12 @@ static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd,
#endif
buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
hwif->nleft--;
hwif->cursg_ofs++;
cmd->nleft--;
cmd->cursg_ofs++;
if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
hwif->cursg = sg_next(hwif->cursg);
hwif->cursg_ofs = 0;
if ((cmd->cursg_ofs * SECTOR_SIZE) == cursg->length) {
cmd->cursg = sg_next(cmd->cursg);
cmd->cursg_ofs = 0;
}
/* do the actual data transfer */
......@@ -260,7 +260,7 @@ static void ide_pio_multi(ide_drive_t *drive, struct ide_cmd *cmd,
{
unsigned int nsect;
nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
nsect = min_t(unsigned int, cmd->nleft, drive->mult_count);
while (nsect--)
ide_pio_sector(drive, cmd, write);
}
......@@ -295,19 +295,18 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct ide_cmd *cmd,
const char *s, u8 stat)
{
if (cmd->tf_flags & IDE_TFLAG_FS) {
ide_hwif_t *hwif = drive->hwif;
int sectors = hwif->nsect - hwif->nleft;
int sectors = cmd->nsect - cmd->nleft;
switch (cmd->data_phase) {
case TASKFILE_IN:
if (hwif->nleft)
if (cmd->nleft)
break;
/* fall through */
case TASKFILE_OUT:
sectors--;
break;
case TASKFILE_MULTI_IN:
if (hwif->nleft)
if (cmd->nleft)
break;
/* fall through */
case TASKFILE_MULTI_OUT:
......@@ -375,7 +374,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
ide_pio_datablock(drive, cmd, 0);
/* Are we done? Check status and finish transfer. */
if (!hwif->nleft) {
if (cmd->nleft == 0) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
return task_error(drive, cmd, __func__, stat);
......@@ -402,10 +401,10 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
return task_error(drive, cmd, __func__, stat);
/* Deal with unexpected ATA data phase. */
if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
return task_error(drive, cmd, __func__, stat);
if (!hwif->nleft) {
if (cmd->nleft == 0) {
ide_finish_cmd(drive, cmd, stat);
return ide_stopped;
}
......
......@@ -1432,7 +1432,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = (rq_data_dir(rq) == WRITE);
int i = hwif->sg_nents, count = 0;
int i = hwif->cmd.sg_nents, count = 0;
/* DMA table is already aligned */
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
......
......@@ -429,7 +429,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
{
ide_hwif_t *hwif = drive->hwif;
unsigned int *table = hwif->dmatable_cpu;
unsigned int count = 0, i = hwif->sg_nents;
unsigned int count = 0, i = hwif->cmd.sg_nents;
struct scatterlist *sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
......
......@@ -240,7 +240,7 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
int i;
struct scatterlist *sg;
for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
for_each_sg(hwif->sg_table, sg, hwif->cmd.sg_nents, i) {
u32 cur_addr, cur_len, bcount;
cur_addr = sg_dma_address(sg);
......
......@@ -344,6 +344,16 @@ struct ide_cmd {
u8 ftf_flags; /* for TASKFILE ioctl */
u32 tf_flags;
int data_phase;
int sg_nents; /* number of sg entries */
int orig_sg_nents;
int sg_dma_direction; /* DMA transfer direction */
unsigned int nsect;
unsigned int nleft;
struct scatterlist *cursg;
unsigned int cursg_ofs;
struct request *rq; /* copy of request */
void *special; /* valid_t generally */
};
......@@ -772,17 +782,9 @@ typedef struct hwif_s {
/* Scatter-gather list used to build the above */
struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */
int sg_nents; /* Current number of entries in it */
int orig_sg_nents;
int sg_dma_direction; /* dma transfer direction */
struct ide_cmd cmd; /* current command */
unsigned int nsect;
unsigned int nleft;
struct scatterlist *cursg;
unsigned int cursg_ofs;
int rqsize; /* max sectors per request */
int irq; /* our irq number */
......@@ -1410,7 +1412,7 @@ int ide_pci_resume(struct pci_dev *);
#endif
void ide_map_sg(ide_drive_t *, struct request *);
void ide_init_sg_cmd(ide_drive_t *, struct request *);
void ide_init_sg_cmd(struct ide_cmd *, int);
#define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment