Commit c7b279ae authored by Christof Schmitt's avatar Christof Schmitt Committed by James Bottomley

[SCSI] zfcp: Replace kmem_cache for "status read" data

zfcp requires a mempool for the status read data blocks to resubmit
the "status read" requests at any time. Each status read data block
has the size of a page (4096 bytes) and needs to be placed in one
page.

Instead of having a kmem_cache for allocating page sized chunks, use
mempool_create_page_pool to create a mempool returning pages and
remove the zfcp kmem_cache.
Signed-off-by: default avatarChristof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: default avatarSteffen Maier <maier@linux.vnet.ibm.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 7c35e77b
...@@ -132,11 +132,6 @@ static int __init zfcp_module_init(void) ...@@ -132,11 +132,6 @@ static int __init zfcp_module_init(void)
if (!zfcp_data.qtcb_cache) if (!zfcp_data.qtcb_cache)
goto out_qtcb_cache; goto out_qtcb_cache;
zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
sizeof(struct fsf_status_read_buffer));
if (!zfcp_data.sr_buffer_cache)
goto out_sr_cache;
zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
sizeof(struct zfcp_fc_gid_pn)); sizeof(struct zfcp_fc_gid_pn));
if (!zfcp_data.gid_pn_cache) if (!zfcp_data.gid_pn_cache)
...@@ -181,8 +176,6 @@ static int __init zfcp_module_init(void) ...@@ -181,8 +176,6 @@ static int __init zfcp_module_init(void)
out_adisc_cache: out_adisc_cache:
kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache);
out_gid_cache: out_gid_cache:
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
out_sr_cache:
kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.qtcb_cache);
out_qtcb_cache: out_qtcb_cache:
kmem_cache_destroy(zfcp_data.gpn_ft_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache);
...@@ -199,7 +192,6 @@ static void __exit zfcp_module_exit(void) ...@@ -199,7 +192,6 @@ static void __exit zfcp_module_exit(void)
fc_release_transport(zfcp_data.scsi_transport_template); fc_release_transport(zfcp_data.scsi_transport_template);
kmem_cache_destroy(zfcp_data.adisc_cache); kmem_cache_destroy(zfcp_data.adisc_cache);
kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache);
kmem_cache_destroy(zfcp_data.sr_buffer_cache);
kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.qtcb_cache);
kmem_cache_destroy(zfcp_data.gpn_ft_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache);
} }
...@@ -264,10 +256,10 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) ...@@ -264,10 +256,10 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
if (!adapter->pool.qtcb_pool) if (!adapter->pool.qtcb_pool)
return -ENOMEM; return -ENOMEM;
adapter->pool.status_read_data = BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
mempool_create_slab_pool(FSF_STATUS_READS_RECOM, adapter->pool.sr_data =
zfcp_data.sr_buffer_cache); mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
if (!adapter->pool.status_read_data) if (!adapter->pool.sr_data)
return -ENOMEM; return -ENOMEM;
adapter->pool.gid_pn = adapter->pool.gid_pn =
...@@ -290,8 +282,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) ...@@ -290,8 +282,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.qtcb_pool); mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req) if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req); mempool_destroy(adapter->pool.status_read_req);
if (adapter->pool.status_read_data) if (adapter->pool.sr_data)
mempool_destroy(adapter->pool.status_read_data); mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn) if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn); mempool_destroy(adapter->pool.gid_pn);
} }
......
...@@ -107,7 +107,7 @@ struct zfcp_adapter_mempool { ...@@ -107,7 +107,7 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_req; mempool_t *scsi_req;
mempool_t *scsi_abort; mempool_t *scsi_abort;
mempool_t *status_read_req; mempool_t *status_read_req;
mempool_t *status_read_data; mempool_t *sr_data;
mempool_t *gid_pn; mempool_t *gid_pn;
mempool_t *qtcb_pool; mempool_t *qtcb_pool;
}; };
...@@ -319,7 +319,6 @@ struct zfcp_data { ...@@ -319,7 +319,6 @@ struct zfcp_data {
struct scsi_transport_template *scsi_transport_template; struct scsi_transport_template *scsi_transport_template;
struct kmem_cache *gpn_ft_cache; struct kmem_cache *gpn_ft_cache;
struct kmem_cache *qtcb_cache; struct kmem_cache *qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache; struct kmem_cache *gid_pn_cache;
struct kmem_cache *adisc_cache; struct kmem_cache *adisc_cache;
}; };
......
...@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) ...@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
if (mempool_resize(act->adapter->pool.status_read_data, if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL)) act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
......
...@@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) ...@@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req); zfcp_dbf_hba_fsf_uss("fssrh_1", req);
mempool_free(sr_buf, adapter->pool.status_read_data); mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req); zfcp_fsf_req_free(req);
return; return;
} }
...@@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) ...@@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break; break;
} }
mempool_free(sr_buf, adapter->pool.status_read_data); mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req); zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss); atomic_inc(&adapter->stat_miss);
...@@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) ...@@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req; struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf; struct fsf_status_read_buffer *sr_buf;
struct page *page;
int retval = -EIO; int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock); spin_lock_irq(&qdio->req_q_lock);
...@@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) ...@@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out; goto out;
} }
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
if (!sr_buf) { if (!page) {
retval = -ENOMEM; retval = -ENOMEM;
goto failed_buf; goto failed_buf;
} }
sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf)); memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf; req->data = sr_buf;
...@@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) ...@@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
failed_req_send: failed_req_send:
req->data = NULL; req->data = NULL;
mempool_free(sr_buf, adapter->pool.status_read_data); mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf: failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req); zfcp_fsf_req_free(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment