Commit b58d9154 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by James Bottomley

[SCSI] export scsi-ml functions needed by tgt_scsi_lib and its LLDs

This patch contains the needed changes to the scsi-ml for the target
mode support.

Note, per the last review we moved almost all the fields we added
to the scsi_cmnd to our internal data structure which we are going
to try and kill off when we can replace it with support from other
parts of the kernel.

The one field we left on was the offset variable. This is needed to handle
the case where the target gets request that is so large that it cannot
execute it in one dma operation. So max_secotors or a segment limit may
limit the size of the transfer. In this case our tgt core code will
break up the command into managable transfers and send them to the
LLD one at a time. The offset is then used to tell the LLD where in
the command we are at. Is there another field on the scsi_cmd for
that?
Signed-off-by: default avatarMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 84ad58e4
...@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev) ...@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
kthread_stop(shost->ehandler); kthread_stop(shost->ehandler);
if (shost->work_q) if (shost->work_q)
destroy_workqueue(shost->work_q); destroy_workqueue(shost->work_q);
if (shost->uspace_req_q) {
kfree(shost->uspace_req_q->queuedata);
scsi_free_queue(shost->uspace_req_q);
}
scsi_destroy_command_freelist(shost); scsi_destroy_command_freelist(shost);
if (shost->bqt) if (shost->bqt)
......
...@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { ...@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
static DEFINE_MUTEX(host_cmd_pool_mutex); static DEFINE_MUTEX(host_cmd_pool_mutex);
static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
gfp_t gfp_mask)
{ {
struct scsi_cmnd *cmd; struct scsi_cmnd *cmd;
...@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, ...@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
return cmd; return cmd;
} }
EXPORT_SYMBOL_GPL(__scsi_get_command);
/* /*
* Function: scsi_get_command() * Function: scsi_get_command()
...@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) ...@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
put_device(&dev->sdev_gendev); put_device(&dev->sdev_gendev);
return cmd; return cmd;
} }
EXPORT_SYMBOL(scsi_get_command); EXPORT_SYMBOL(scsi_get_command);
void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
struct device *dev)
{
unsigned long flags;
/* changing locks here, don't need to restore the irq state */
spin_lock_irqsave(&shost->free_list_lock, flags);
if (unlikely(list_empty(&shost->free_list))) {
list_add(&cmd->list, &shost->free_list);
cmd = NULL;
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL))
kmem_cache_free(shost->cmd_pool->slab, cmd);
put_device(dev);
}
EXPORT_SYMBOL(__scsi_put_command);
/* /*
* Function: scsi_put_command() * Function: scsi_put_command()
* *
...@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command); ...@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
void scsi_put_command(struct scsi_cmnd *cmd) void scsi_put_command(struct scsi_cmnd *cmd)
{ {
struct scsi_device *sdev = cmd->device; struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags; unsigned long flags;
/* serious error if the command hasn't come from a device list */ /* serious error if the command hasn't come from a device list */
spin_lock_irqsave(&cmd->device->list_lock, flags); spin_lock_irqsave(&cmd->device->list_lock, flags);
BUG_ON(list_empty(&cmd->list)); BUG_ON(list_empty(&cmd->list));
list_del_init(&cmd->list); list_del_init(&cmd->list);
spin_unlock(&cmd->device->list_lock); spin_unlock_irqrestore(&cmd->device->list_lock, flags);
/* changing locks here, don't need to restore the irq state */
spin_lock(&shost->free_list_lock);
if (unlikely(list_empty(&shost->free_list))) {
list_add(&cmd->list, &shost->free_list);
cmd = NULL;
}
spin_unlock_irqrestore(&shost->free_list_lock, flags);
if (likely(cmd != NULL)) __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
kmem_cache_free(shost->cmd_pool->slab, cmd);
put_device(&sdev->sdev_gendev);
} }
EXPORT_SYMBOL(scsi_put_command); EXPORT_SYMBOL(scsi_put_command);
......
...@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, ...@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
return NULL; return NULL;
} }
static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{ {
struct scsi_host_sg_pool *sgp; struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl; struct scatterlist *sgl;
...@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m ...@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
return sgl; return sgl;
} }
static void scsi_free_sgtable(struct scatterlist *sgl, int index) EXPORT_SYMBOL(scsi_alloc_sgtable);
void scsi_free_sgtable(struct scatterlist *sgl, int index)
{ {
struct scsi_host_sg_pool *sgp; struct scsi_host_sg_pool *sgp;
...@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index) ...@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
mempool_free(sgl, sgp->pool); mempool_free(sgl, sgp->pool);
} }
EXPORT_SYMBOL(scsi_free_sgtable);
/* /*
* Function: scsi_release_buffers() * Function: scsi_release_buffers()
* *
...@@ -1567,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) ...@@ -1567,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
} }
EXPORT_SYMBOL(scsi_calculate_bounce_limit); EXPORT_SYMBOL(scsi_calculate_bounce_limit);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{ {
struct Scsi_Host *shost = sdev->host;
struct request_queue *q; struct request_queue *q;
q = blk_init_queue(scsi_request_fn, NULL); q = blk_init_queue(request_fn, NULL);
if (!q) if (!q)
return NULL; return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_max_hw_segments(q, shost->sg_tablesize); blk_queue_max_hw_segments(q, shost->sg_tablesize);
blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(q, shost->max_sectors); blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary); blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
if (!shost->use_clustering) if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q; return q;
} }
EXPORT_SYMBOL(__scsi_alloc_queue);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
struct request_queue *q;
q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
if (!q)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
return q;
}
void scsi_free_queue(struct request_queue *q) void scsi_free_queue(struct request_queue *q)
{ {
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
struct request; struct request;
struct scatterlist; struct scatterlist;
struct Scsi_Host;
struct scsi_device; struct scsi_device;
...@@ -72,6 +73,9 @@ struct scsi_cmnd { ...@@ -72,6 +73,9 @@ struct scsi_cmnd {
unsigned short use_sg; /* Number of pieces of scatter-gather */ unsigned short use_sg; /* Number of pieces of scatter-gather */
unsigned short sglist_len; /* size of malloc'd scatter-gather list */ unsigned short sglist_len; /* size of malloc'd scatter-gather list */
/* offset in cmd we are at (for multi-transfer tgt cmds) */
unsigned offset;
unsigned underflow; /* Return error if less than unsigned underflow; /* Return error if less than
this amount is transferred */ this amount is transferred */
...@@ -119,7 +123,10 @@ struct scsi_cmnd { ...@@ -119,7 +123,10 @@ struct scsi_cmnd {
}; };
extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
extern void scsi_put_command(struct scsi_cmnd *); extern void scsi_put_command(struct scsi_cmnd *);
extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
struct device *);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_finish_command(struct scsi_cmnd *cmd); extern void scsi_finish_command(struct scsi_cmnd *cmd);
extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd); extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
...@@ -128,4 +135,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, ...@@ -128,4 +135,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len); size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt); extern void scsi_kunmap_atomic_sg(void *virt);
extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
extern void scsi_free_sgtable(struct scatterlist *, int);
#endif /* _SCSI_SCSI_CMND_H */ #endif /* _SCSI_SCSI_CMND_H */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
struct request_queue;
struct block_device; struct block_device;
struct completion; struct completion;
struct module; struct module;
...@@ -123,6 +124,39 @@ struct scsi_host_template { ...@@ -123,6 +124,39 @@ struct scsi_host_template {
int (* queuecommand)(struct scsi_cmnd *, int (* queuecommand)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *)); void (*done)(struct scsi_cmnd *));
/*
* The transfer functions are used to queue a scsi command to
* the LLD. When the driver is finished processing the command
* the done callback is invoked.
*
* return values: see queuecommand
*
* If the LLD accepts the cmd, it should set the result to an
* appropriate value when completed before calling the done function.
*
* STATUS: REQUIRED FOR TARGET DRIVERS
*/
/* TODO: rename */
int (* transfer_response)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
/*
* This is called to inform the LLD to transfer cmd->request_bufflen
* bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
* speciefies the number of scatterlist entried in the command
* and cmd->request_buffer contains the scatterlist.
*
* If the command cannot be processed in one transfer_data call
* becuase a scatterlist within the LLD's limits cannot be
* created then transfer_data will be called multiple times.
* It is initially called from process context, and later
* calls are from the interrup context.
*/
int (* transfer_data)(struct scsi_cmnd *,
void (*done)(struct scsi_cmnd *));
/* Used as callback for the completion of task management request. */
int (* tsk_mgmt_response)(u64 mid, int result);
/* /*
* This is an error handling strategy routine. You don't need to * This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default * define one of these if you don't want to - there is a default
...@@ -589,6 +623,12 @@ struct Scsi_Host { ...@@ -589,6 +623,12 @@ struct Scsi_Host {
*/ */
unsigned int max_host_blocked; unsigned int max_host_blocked;
/*
* q used for scsi_tgt msgs, async events or any other requests that
* need to be processed in userspace
*/
struct request_queue *uspace_req_q;
/* legacy crap */ /* legacy crap */
unsigned long base; unsigned long base;
unsigned long io_port; unsigned long io_port;
...@@ -687,6 +727,9 @@ extern void scsi_unblock_requests(struct Scsi_Host *); ...@@ -687,6 +727,9 @@ extern void scsi_unblock_requests(struct Scsi_Host *);
extern void scsi_block_requests(struct Scsi_Host *); extern void scsi_block_requests(struct Scsi_Host *);
struct class_container; struct class_container;
extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
void (*) (struct request_queue *));
/* /*
* These two functions are used to allocate and free a pseudo device * These two functions are used to allocate and free a pseudo device
* which will connect to the host adapter itself rather than any * which will connect to the host adapter itself rather than any
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment