Commit e64aa657 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Doug Ledford

target: enhance and export target_alloc_sgl/target_free_sgl

The SRP target driver will need to allocate and chain it's own SGLs soon.
For this export target_alloc_sgl, and add a new argument to it so that it
can allocate an additional chain entry that doesn't point to a page.  Also
export transport_free_sgl after renaming it to target_free_sgl to free
these SGLs again.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent a060b562
...@@ -2195,7 +2195,7 @@ static void target_complete_ok_work(struct work_struct *work) ...@@ -2195,7 +2195,7 @@ static void target_complete_ok_work(struct work_struct *work)
transport_handle_queue_full(cmd, cmd->se_dev); transport_handle_queue_full(cmd, cmd->se_dev);
} }
static inline void transport_free_sgl(struct scatterlist *sgl, int nents) void target_free_sgl(struct scatterlist *sgl, int nents)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int count; int count;
...@@ -2205,6 +2205,7 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents) ...@@ -2205,6 +2205,7 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
kfree(sgl); kfree(sgl);
} }
EXPORT_SYMBOL(target_free_sgl);
static inline void transport_reset_sgl_orig(struct se_cmd *cmd) static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{ {
...@@ -2225,7 +2226,7 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd) ...@@ -2225,7 +2226,7 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd) static inline void transport_free_pages(struct se_cmd *cmd)
{ {
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
cmd->t_prot_sg = NULL; cmd->t_prot_sg = NULL;
cmd->t_prot_nents = 0; cmd->t_prot_nents = 0;
} }
...@@ -2236,7 +2237,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) ...@@ -2236,7 +2237,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
* SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
*/ */
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
transport_free_sgl(cmd->t_bidi_data_sg, target_free_sgl(cmd->t_bidi_data_sg,
cmd->t_bidi_data_nents); cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0; cmd->t_bidi_data_nents = 0;
...@@ -2246,11 +2247,11 @@ static inline void transport_free_pages(struct se_cmd *cmd) ...@@ -2246,11 +2247,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
} }
transport_reset_sgl_orig(cmd); transport_reset_sgl_orig(cmd);
transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL; cmd->t_data_sg = NULL;
cmd->t_data_nents = 0; cmd->t_data_nents = 0;
transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0; cmd->t_bidi_data_nents = 0;
} }
...@@ -2324,20 +2325,22 @@ EXPORT_SYMBOL(transport_kunmap_data_sg); ...@@ -2324,20 +2325,22 @@ EXPORT_SYMBOL(transport_kunmap_data_sg);
int int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page) bool zero_page, bool chainable)
{ {
struct scatterlist *sg; struct scatterlist *sg;
struct page *page; struct page *page;
gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0; gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
unsigned int nent; unsigned int nalloc, nent;
int i = 0; int i = 0;
nent = DIV_ROUND_UP(length, PAGE_SIZE); nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); if (chainable)
nalloc++;
sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg) if (!sg)
return -ENOMEM; return -ENOMEM;
sg_init_table(sg, nent); sg_init_table(sg, nalloc);
while (length) { while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE); u32 page_len = min_t(u32, length, PAGE_SIZE);
...@@ -2361,6 +2364,7 @@ target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, ...@@ -2361,6 +2364,7 @@ target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
kfree(sg); kfree(sg);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(target_alloc_sgl);
/* /*
* Allocate any required resources to execute the command. For writes we * Allocate any required resources to execute the command. For writes we
...@@ -2376,7 +2380,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2376,7 +2380,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
if (cmd->prot_op != TARGET_PROT_NORMAL && if (cmd->prot_op != TARGET_PROT_NORMAL &&
!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
cmd->prot_length, true); cmd->prot_length, true, false);
if (ret < 0) if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
...@@ -2401,13 +2405,13 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2401,13 +2405,13 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg, ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents, &cmd->t_bidi_data_nents,
bidi_length, zero_flag); bidi_length, zero_flag, false);
if (ret < 0) if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, zero_flag); cmd->data_length, zero_flag, false);
if (ret < 0) if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
...@@ -2421,7 +2425,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -2421,7 +2425,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg, ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents, &cmd->t_bidi_data_nents,
caw_length, zero_flag); caw_length, zero_flag, false);
if (ret < 0) if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} }
......
...@@ -563,7 +563,7 @@ static int target_xcopy_setup_pt_cmd( ...@@ -563,7 +563,7 @@ static int target_xcopy_setup_pt_cmd(
if (alloc_mem) { if (alloc_mem) {
rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, false); cmd->data_length, false, false);
if (rc < 0) { if (rc < 0) {
ret = rc; ret = rc;
goto out; goto out;
......
...@@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[]; ...@@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[];
void *transport_kmap_data_sg(struct se_cmd *); void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *); void transport_kunmap_data_sg(struct se_cmd *);
/* core helpers also used by xcopy during internal command setup */ /* core helpers also used by xcopy during internal command setup */
int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32); struct scatterlist *, u32, struct scatterlist *, u32);
......
...@@ -185,6 +185,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *, ...@@ -185,6 +185,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *,
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int); int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
int core_tpg_deregister(struct se_portal_group *); int core_tpg_deregister(struct se_portal_group *);
int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
u32 length, bool zero_page, bool chainable);
void target_free_sgl(struct scatterlist *sgl, int nents);
/* /*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going * The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment