Commit 9105b8aa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "This is two simple target fixes and one discard related I/O starvation
  problem in sd.

  The discard problem occurs because the discard page doesn't have a
  mempool backing so if the allocation fails due to memory pressure, we
  then lose the forward progress we require if the writeout is on the
  same device. The fix is to back it with a mempool"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: sd: use mempool for discard special page
  scsi: target: iscsi: cxgbit: add missing spin_lock_init()
  scsi: target: iscsi: cxgbit: fix csk leak
parents 1104bd96 61cce6f6
...@@ -133,6 +133,7 @@ static DEFINE_MUTEX(sd_ref_mutex); ...@@ -133,6 +133,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
static struct kmem_cache *sd_cdb_cache; static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_cdb_pool; static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
static const char *sd_cache_types[] = { static const char *sd_cache_types[] = {
"write through", "none", "write back", "write through", "none", "write back",
...@@ -759,9 +760,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) ...@@ -759,9 +760,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
unsigned int data_len = 24; unsigned int data_len = 24;
char *buf; char *buf;
rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page) if (!rq->special_vec.bv_page)
return BLKPREP_DEFER; return BLKPREP_DEFER;
clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0; rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len; rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD; rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
...@@ -792,9 +794,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) ...@@ -792,9 +794,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
u32 data_len = sdp->sector_size; u32 data_len = sdp->sector_size;
rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page) if (!rq->special_vec.bv_page)
return BLKPREP_DEFER; return BLKPREP_DEFER;
clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0; rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len; rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD; rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
...@@ -822,9 +825,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) ...@@ -822,9 +825,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
u32 data_len = sdp->sector_size; u32 data_len = sdp->sector_size;
rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page) if (!rq->special_vec.bv_page)
return BLKPREP_DEFER; return BLKPREP_DEFER;
clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0; rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len; rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD; rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
...@@ -1286,7 +1290,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) ...@@ -1286,7 +1290,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
u8 *cmnd; u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
__free_page(rq->special_vec.bv_page); mempool_free(rq->special_vec.bv_page, sd_page_pool);
if (SCpnt->cmnd != scsi_req(rq)->cmd) { if (SCpnt->cmnd != scsi_req(rq)->cmd) {
cmnd = SCpnt->cmnd; cmnd = SCpnt->cmnd;
...@@ -3623,6 +3627,13 @@ static int __init init_sd(void) ...@@ -3623,6 +3627,13 @@ static int __init init_sd(void)
goto err_out_cache; goto err_out_cache;
} }
sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
if (!sd_page_pool) {
printk(KERN_ERR "sd: can't init discard page pool\n");
err = -ENOMEM;
goto err_out_ppool;
}
err = scsi_register_driver(&sd_template.gendrv); err = scsi_register_driver(&sd_template.gendrv);
if (err) if (err)
goto err_out_driver; goto err_out_driver;
...@@ -3630,6 +3641,9 @@ static int __init init_sd(void) ...@@ -3630,6 +3641,9 @@ static int __init init_sd(void)
return 0; return 0;
err_out_driver: err_out_driver:
mempool_destroy(sd_page_pool);
err_out_ppool:
mempool_destroy(sd_cdb_pool); mempool_destroy(sd_cdb_pool);
err_out_cache: err_out_cache:
...@@ -3656,6 +3670,7 @@ static void __exit exit_sd(void) ...@@ -3656,6 +3670,7 @@ static void __exit exit_sd(void)
scsi_unregister_driver(&sd_template.gendrv); scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_cdb_pool); mempool_destroy(sd_cdb_pool);
mempool_destroy(sd_page_pool);
kmem_cache_destroy(sd_cdb_cache); kmem_cache_destroy(sd_cdb_cache);
class_unregister(&sd_disk_class); class_unregister(&sd_disk_class);
......
...@@ -641,8 +641,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk) ...@@ -641,8 +641,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb) static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
{ {
struct cxgbit_sock *csk = handle;
pr_debug("%s cxgbit_device %p\n", __func__, handle); pr_debug("%s cxgbit_device %p\n", __func__, handle);
kfree_skb(skb); kfree_skb(skb);
cxgbit_put_csk(csk);
} }
static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb) static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
...@@ -1206,7 +1209,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req) ...@@ -1206,7 +1209,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
rpl5->opt0 = cpu_to_be64(opt0); rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2); rpl5->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx); set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard); t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
} }
......
...@@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) ...@@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kref_init(&cdev->kref); kref_init(&cdev->kref);
spin_lock_init(&cdev->np_lock);
cdev->lldi = *lldi; cdev->lldi = *lldi;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment