Commit 4099ac93 authored by Hannes Reinecke's avatar Hannes Reinecke Committed by Greg Kroah-Hartman

scsi: sg: protect accesses to 'reserved' page array

commit 1bc0eb04 upstream.

The 'reserved' page array is used as a short-cut for mapping data,
saving us to allocate pages per request. However, the 'reserved' array
is only capable of holding one request, so this patch introduces a mutex
for protect 'sg_fd' against concurrent accesses.
Signed-off-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Tested-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>

[toddpoynor@google.com: backport to 3.18-4.9,  fixup for bad ioctl
SG_SET_FORCE_LOW_DMA code removed in later versions and not modified by
the original patch.]
Signed-off-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Tested-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarTodd Poynor <toddpoynor@google.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c0c6dff9
...@@ -142,6 +142,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ ...@@ -142,6 +142,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */ struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */ wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */ rwlock_t rq_list_lock; /* protect access to list in req_arr */
struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */
...@@ -155,6 +156,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ ...@@ -155,6 +156,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */ char mmap_called; /* 0 -> mmap() never called on this fd */
char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref; struct kref f_ref;
struct execute_work ew; struct execute_work ew;
} Sg_fd; } Sg_fd;
...@@ -198,7 +200,6 @@ static void sg_remove_sfp(struct kref *); ...@@ -198,7 +200,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp); static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev); static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref); static void sg_device_destroy(struct kref *kref);
...@@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) ...@@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
} }
buf += SZ_SG_HEADER; buf += SZ_SG_HEADER;
__get_user(opcode, buf); __get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) { if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len; cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */ sfp->next_cmd_len = 0; /* reset so only this write() effected */
...@@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) ...@@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte) if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12; cmd_size = 12;
} }
mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */ /* Determine buffer size. */
...@@ -721,7 +724,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, ...@@ -721,7 +724,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp); sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
} }
if (sg_res_in_use(sfp)) { if (sfp->res_in_use) {
sg_remove_request(sfp, srp); sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */ return -EBUSY; /* reserve buffer already being used */
} }
...@@ -892,7 +895,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) ...@@ -892,7 +895,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result; return result;
if (val) { if (val) {
sfp->low_dma = 1; sfp->low_dma = 1;
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen; val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve); sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val); sg_build_reserve(sfp, val);
...@@ -967,12 +970,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) ...@@ -967,12 +970,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL; return -EINVAL;
val = min_t(int, val, val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue)); max_sectors_bytes(sdp->device->request_queue));
mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) { if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called) if (sfp->mmap_called ||
sfp->res_in_use) {
mutex_unlock(&sfp->f_mutex);
return -EBUSY; return -EBUSY;
}
sg_remove_scat(sfp, &sfp->reserve); sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val); sg_build_reserve(sfp, val);
} }
mutex_unlock(&sfp->f_mutex);
return 0; return 0;
case SG_GET_RESERVED_SIZE: case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen, val = min_t(int, sfp->reserve.bufflen,
...@@ -1727,13 +1736,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) ...@@ -1727,13 +1736,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data; md = &map_data;
if (md) { if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) mutex_lock(&sfp->f_mutex);
if (dxfer_len <= rsv_schp->bufflen &&
!sfp->res_in_use) {
sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len); sg_link_reserve(sfp, srp, dxfer_len);
else { } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
mutex_unlock(&sfp->f_mutex);
return -EBUSY;
} else {
res = sg_build_indirect(req_schp, sfp, dxfer_len); res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res) if (res) {
mutex_unlock(&sfp->f_mutex);
return res; return res;
}
} }
mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages; md->pages = req_schp->pages;
md->page_order = req_schp->page_order; md->page_order = req_schp->page_order;
...@@ -2135,6 +2153,7 @@ sg_add_sfp(Sg_device * sdp) ...@@ -2135,6 +2153,7 @@ sg_add_sfp(Sg_device * sdp)
rwlock_init(&sfp->rq_list_lock); rwlock_init(&sfp->rq_list_lock);
kref_init(&sfp->f_ref); kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->force_packid = SG_DEF_FORCE_PACK_ID;
...@@ -2210,20 +2229,6 @@ sg_remove_sfp(struct kref *kref) ...@@ -2210,20 +2229,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work); schedule_work(&sfp->ew.work);
} }
static int
sg_res_in_use(Sg_fd * sfp)
{
const Sg_request *srp;
unsigned long iflags;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp)
if (srp->res_used)
break;
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return srp ? 1 : 0;
}
#ifdef CONFIG_SCSI_PROC_FS #ifdef CONFIG_SCSI_PROC_FS
static int static int
sg_idr_max_id(int id, void *p, void *data) sg_idr_max_id(int id, void *p, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment