Commit e4648b01 authored by Ilias Tsitsimpis's avatar Ilias Tsitsimpis Committed by Nicholas Bellinger

target/user: Add support for bidirectional commands

Enable TCMU to handle bidirectional SCSI commands. In such cases,
entries in iov[] cover both the Data-In and the Data-Out buffers. The
first iov_cnt entries correspond to the Data-Out buffer, while the
remaining iov_bidi_cnt entries correspond to the Data-In buffer.
Signed-off-by: default avatarIlias Tsitsimpis <iliastsi@arrikto.com>
Signed-off-by: default avatarVangelis Koukis <vkoukis@arrikto.com>
Reviewed-by: default avatarAndy Grover <agrover@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent f97ec7db
...@@ -152,7 +152,7 @@ overall shared memory region, not the entry. The data in/out buffers ...@@ -152,7 +152,7 @@ overall shared memory region, not the entry. The data in/out buffers
are accessible via tht req.iov[] array. iov_cnt contains the number of are accessible via tht req.iov[] array. iov_cnt contains the number of
entries in iov[] needed to describe either the Data-In or Data-Out entries in iov[] needed to describe either the Data-In or Data-Out
buffers. For bidirectional commands, iov_cnt specifies how many iovec buffers. For bidirectional commands, iov_cnt specifies how many iovec
entries cover the Data-Out area, and iov_bidi_count specifies how many entries cover the Data-Out area, and iov_bidi_cnt specifies how many
iovec entries immediately after that in iov[] cover the Data-In iovec entries immediately after that in iov[] cover the Data-In
area. Just like other fields, iov.iov_base is an offset from the start area. Just like other fields, iov.iov_base is an offset from the start
of the region. of the region.
......
...@@ -167,6 +167,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) ...@@ -167,6 +167,11 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->tcmu_dev = udev; tcmu_cmd->tcmu_dev = udev;
tcmu_cmd->data_length = se_cmd->data_length; tcmu_cmd->data_length = se_cmd->data_length;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
}
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
...@@ -387,7 +392,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -387,7 +392,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* b/c size == offsetof one-past-element. * b/c size == offsetof one-past-element.
*/ */
base_command_size = max(offsetof(struct tcmu_cmd_entry, base_command_size = max(offsetof(struct tcmu_cmd_entry,
req.iov[se_cmd->t_data_nents + 2]), req.iov[se_cmd->t_bidi_data_nents +
se_cmd->t_data_nents + 2]),
sizeof(struct tcmu_cmd_entry)); sizeof(struct tcmu_cmd_entry));
command_size = base_command_size command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
...@@ -456,13 +462,19 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) ...@@ -456,13 +462,19 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
*/ */
iov = &entry->req.iov[0]; iov = &entry->req.iov[0];
iov_cnt = 0; iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE); copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
|| se_cmd->se_cmd_flags & SCF_BIDI);
alloc_and_scatter_data_area(udev, se_cmd->t_data_sg, alloc_and_scatter_data_area(udev, se_cmd->t_data_sg,
se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area); se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area);
entry->req.iov_cnt = iov_cnt; entry->req.iov_cnt = iov_cnt;
entry->req.iov_bidi_cnt = 0;
entry->req.iov_dif_cnt = 0; entry->req.iov_dif_cnt = 0;
/* Handle BIDI commands */
iov_cnt = 0;
alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
entry->req.iov_bidi_cnt = iov_cnt;
/* All offsets relative to mb_addr, not start of entry! */ /* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size; cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
...@@ -535,8 +547,15 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * ...@@ -535,8 +547,15 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->scsi_sense_length); se_cmd->scsi_sense_length);
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
} } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
else if (se_cmd->data_direction == DMA_FROM_DEVICE) { /* Discard data_out buffer */
UPDATE_HEAD(udev->data_tail,
(size_t)se_cmd->t_data_sg->length, udev->data_size);
/* Get Data-In buffer */
gather_and_free_data_area(udev,
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
gather_and_free_data_area(udev, gather_and_free_data_area(udev,
se_cmd->t_data_sg, se_cmd->t_data_nents); se_cmd->t_data_sg, se_cmd->t_data_nents);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) { } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment