Commit f078727b authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by James Bottomley

[SCSI] remove scsi_req_map_sg

No one uses scsi_execute_async with data transfer now. We can remove
scsi_req_map_sg.

Only scsi_eh_lock_door uses scsi_execute_async. scsi_eh_lock_door
doesn't handle sense and the callback. So we can remove
scsi_io_context too.
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent 26243043
...@@ -1441,6 +1441,11 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) ...@@ -1441,6 +1441,11 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
} }
} }
static void eh_lock_door_done(struct request *req, int uptodate)
{
__blk_put_request(req->q, req);
}
/** /**
* scsi_eh_lock_door - Prevent medium removal for the specified device * scsi_eh_lock_door - Prevent medium removal for the specified device
* @sdev: SCSI device to prevent medium removal * @sdev: SCSI device to prevent medium removal
...@@ -1463,19 +1468,28 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) ...@@ -1463,19 +1468,28 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
*/ */
static void scsi_eh_lock_door(struct scsi_device *sdev) static void scsi_eh_lock_door(struct scsi_device *sdev)
{ {
unsigned char cmnd[MAX_COMMAND_SIZE]; struct request *req;
cmnd[0] = ALLOW_MEDIUM_REMOVAL; req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
cmnd[1] = 0; if (!req)
cmnd[2] = 0; return;
cmnd[3] = 0;
cmnd[4] = SCSI_REMOVAL_PREVENT;
cmnd[5] = 0;
scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ, req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
5, NULL, NULL, GFP_KERNEL); req->cmd[1] = 0;
} req->cmd[2] = 0;
req->cmd[3] = 0;
req->cmd[4] = SCSI_REMOVAL_PREVENT;
req->cmd[5] = 0;
req->cmd_len = COMMAND_SIZE(req->cmd[0]);
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_QUIET;
req->timeout = 10 * HZ;
req->retries = 5;
blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
}
/** /**
* scsi_restart_operations - restart io operations to the specified host. * scsi_restart_operations - restart io operations to the specified host.
......
...@@ -277,196 +277,6 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -277,196 +277,6 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
} }
EXPORT_SYMBOL(scsi_execute_req); EXPORT_SYMBOL(scsi_execute_req);
struct scsi_io_context {
void *data;
void (*done)(void *data, char *sense, int result, int resid);
char sense[SCSI_SENSE_BUFFERSIZE];
};
static struct kmem_cache *scsi_io_context_cache;
static void scsi_end_async(struct request *req, int uptodate)
{
struct scsi_io_context *sioc = req->end_io_data;
if (sioc->done)
sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
kmem_cache_free(scsi_io_context_cache, sioc);
__blk_put_request(req->q, req);
}
static int scsi_merge_bio(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
blk_queue_bounce(q, &bio);
return blk_rq_append_bio(q, rq, bio);
}
static void scsi_bi_endio(struct bio *bio, int error)
{
bio_put(bio);
}
/**
* scsi_req_map_sg - map a scatterlist into a request
* @rq: request to fill
* @sgl: scatterlist
* @nsegs: number of elements
* @bufflen: len of buffer
* @gfp: memory allocation flags
*
* scsi_req_map_sg maps a scatterlist into a request so that the
* request can be sent to the block layer. We do not trust the scatterlist
* sent to use, as some ULDs use that struct to only organize the pages.
*/
static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
int nsegs, unsigned bufflen, gfp_t gfp)
{
struct request_queue *q = rq->q;
int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned int data_len = bufflen, len, bytes, off;
struct scatterlist *sg;
struct page *page;
struct bio *bio = NULL;
int i, err, nr_vecs = 0;
for_each_sg(sgl, sg, nsegs, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
while (len > 0 && data_len > 0) {
/*
* sg sends a scatterlist that is larger than
* the data_len it wants transferred for certain
* IO sizes
*/
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
bio = bio_alloc(gfp, nr_vecs);
if (!bio) {
err = -ENOMEM;
goto free_bios;
}
bio->bi_end_io = scsi_bi_endio;
}
if (bio_add_pc_page(q, bio, page, bytes, off) !=
bytes) {
bio_put(bio);
err = -EINVAL;
goto free_bios;
}
if (bio->bi_vcnt >= nr_vecs) {
err = scsi_merge_bio(rq, bio);
if (err) {
bio_endio(bio, 0);
goto free_bios;
}
bio = NULL;
}
page++;
len -= bytes;
data_len -=bytes;
off = 0;
}
}
rq->buffer = rq->data = NULL;
rq->data_len = bufflen;
return 0;
free_bios:
while ((bio = rq->bio) != NULL) {
rq->bio = bio->bi_next;
/*
* call endio instead of bio_put incase it was bounced
*/
bio_endio(bio, 0);
}
return err;
}
/**
* scsi_execute_async - insert request
* @sdev: scsi device
* @cmd: scsi command
* @cmd_len: length of scsi cdb
* @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE
* @buffer: data buffer (this can be a kernel buffer or scatterlist)
* @bufflen: len of buffer
* @use_sg: if buffer is a scatterlist this is the number of elements
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @privdata: data passed to done()
* @done: callback function when done
* @gfp: memory allocation flags
*/
int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
int cmd_len, int data_direction, void *buffer, unsigned bufflen,
int use_sg, int timeout, int retries, void *privdata,
void (*done)(void *, char *, int, int), gfp_t gfp)
{
struct request *req;
struct scsi_io_context *sioc;
int err = 0;
int write = (data_direction == DMA_TO_DEVICE);
sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
if (!sioc)
return DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, gfp);
if (!req)
goto free_sense;
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->cmd_flags |= REQ_QUIET;
if (use_sg)
err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
else if (bufflen)
err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
if (err)
goto free_req;
req->cmd_len = cmd_len;
memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sioc->sense;
req->sense_len = 0;
req->timeout = timeout;
req->retries = retries;
req->end_io_data = sioc;
sioc->data = privdata;
sioc->done = done;
blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
return 0;
free_req:
blk_put_request(req);
free_sense:
kmem_cache_free(scsi_io_context_cache, sioc);
return DRIVER_ERROR << 24;
}
EXPORT_SYMBOL_GPL(scsi_execute_async);
/* /*
* Function: scsi_init_cmd_errh() * Function: scsi_init_cmd_errh()
* *
...@@ -1920,20 +1730,12 @@ int __init scsi_init_queue(void) ...@@ -1920,20 +1730,12 @@ int __init scsi_init_queue(void)
{ {
int i; int i;
scsi_io_context_cache = kmem_cache_create("scsi_io_context",
sizeof(struct scsi_io_context),
0, 0, NULL);
if (!scsi_io_context_cache) {
printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
return -ENOMEM;
}
scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
sizeof(struct scsi_data_buffer), sizeof(struct scsi_data_buffer),
0, 0, NULL); 0, 0, NULL);
if (!scsi_sdb_cache) { if (!scsi_sdb_cache) {
printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
goto cleanup_io_context; return -ENOMEM;
} }
for (i = 0; i < SG_MEMPOOL_NR; i++) { for (i = 0; i < SG_MEMPOOL_NR; i++) {
...@@ -1968,8 +1770,6 @@ int __init scsi_init_queue(void) ...@@ -1968,8 +1770,6 @@ int __init scsi_init_queue(void)
kmem_cache_destroy(sgp->slab); kmem_cache_destroy(sgp->slab);
} }
kmem_cache_destroy(scsi_sdb_cache); kmem_cache_destroy(scsi_sdb_cache);
cleanup_io_context:
kmem_cache_destroy(scsi_io_context_cache);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1978,7 +1778,6 @@ void scsi_exit_queue(void) ...@@ -1978,7 +1778,6 @@ void scsi_exit_queue(void)
{ {
int i; int i;
kmem_cache_destroy(scsi_io_context_cache);
kmem_cache_destroy(scsi_sdb_cache); kmem_cache_destroy(scsi_sdb_cache);
for (i = 0; i < SG_MEMPOOL_NR; i++) { for (i = 0; i < SG_MEMPOOL_NR; i++) {
......
...@@ -371,12 +371,6 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -371,12 +371,6 @@ extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen, int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *, int timeout, int retries, struct scsi_sense_hdr *, int timeout, int retries,
int *resid); int *resid);
extern int scsi_execute_async(struct scsi_device *sdev,
const unsigned char *cmd, int cmd_len, int data_direction,
void *buffer, unsigned bufflen, int use_sg,
int timeout, int retries, void *privdata,
void (*done)(void *, char *, int, int),
gfp_t gfp);
static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment