Commit f385b697 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

* '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (21 commits)
  target: Convert acl_node_lock to be IRQ-disabling
  target: Make locking in transport_deregister_session() IRQ safe
  tcm_fc: init/exit functions should not be protected by "#ifdef MODULE"
  target: Print subpage too for unhandled MODE SENSE pages
  iscsi-target: Fix iscsit_allocate_se_cmd_for_tmr failure path bugs
  iscsi-target: Implement iSCSI target IPv6 address printing.
  target: Fix task SGL chaining breakage with transport_allocate_data_tasks
  target: Fix task count > 1 handling breakage and use max_sector page alignment
  target: Add missing DATA_SG_IO transport_cmd_get_valid_sectors check
  target: Fix SYNCHRONIZE_CACHE zero LBA + range breakage
  target: Remove duplicate task completions in transport_emulate_control_cdb
  target: Fix WRITE_SAME usage with transport_get_size
  target: Add WRITE_SAME (10) parsing and refactor passthrough checks
  target: Fix write payload exception handling with ->new_cmd_map
  iscsi-target: forever loop bug in iscsit_attach_ooo_cmdsn()
  iscsi-target: remove duplicate return
  target: Convert target_core_rd.c to use use BUG_ON
  iscsi-target: Fix leak on failure in iscsi_copy_param_list()
  target: Use ERR_CAST inlined function
  target: Make standard INQUIRY return 'not connected' for tpg_virt_lun0
  ...
parents be27425d 28638887
...@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack( ...@@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
case 0: case 0:
return iscsit_handle_recovery_datain_or_r2t(conn, buf, return iscsit_handle_recovery_datain_or_r2t(conn, buf,
hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
return 0;
case ISCSI_FLAG_SNACK_TYPE_STATUS: case ISCSI_FLAG_SNACK_TYPE_STATUS:
return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
hdr->begrun, hdr->runlength); hdr->begrun, hdr->runlength);
......
...@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( ...@@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
ISCSI_TCP); ISCSI_TCP);
if (IS_ERR(tpg_np)) { if (IS_ERR(tpg_np)) {
iscsit_put_tpg(tpg); iscsit_put_tpg(tpg);
return ERR_PTR(PTR_ERR(tpg_np)); return ERR_CAST(tpg_np);
} }
pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
...@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn( ...@@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
tiqn = iscsit_add_tiqn((unsigned char *)name); tiqn = iscsit_add_tiqn((unsigned char *)name);
if (IS_ERR(tiqn)) if (IS_ERR(tiqn))
return ERR_PTR(PTR_ERR(tiqn)); return ERR_CAST(tiqn);
/* /*
* Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
*/ */
......
...@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn( ...@@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
*/ */
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) { ooo_list) {
while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
continue; continue;
list_add(&ooo_cmdsn->ooo_list, list_add(&ooo_cmdsn->ooo_list,
......
...@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
ISCSI_LOGIN_STATUS_TARGET_ERROR); ISCSI_LOGIN_STATUS_TARGET_ERROR);
goto new_sess_out; goto new_sess_out;
} }
#if 0 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
if (!iscsi_ntop6((const unsigned char *) &sock_in6.sin6_addr.in6_u);
&sock_in6.sin6_addr.in6_u, conn->login_port = ntohs(sock_in6.sin6_port);
(char *)&conn->ipv6_login_ip[0],
IPV6_ADDRESS_SPACE)) {
pr_err("iscsi_ntop6() failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_TARGET_ERROR);
goto new_sess_out;
}
#else
pr_debug("Skipping iscsi_ntop6()\n");
#endif
} else { } else {
memset(&sock_in, 0, sizeof(struct sockaddr_in)); memset(&sock_in, 0, sizeof(struct sockaddr_in));
......
...@@ -545,13 +545,13 @@ int iscsi_copy_param_list( ...@@ -545,13 +545,13 @@ int iscsi_copy_param_list(
struct iscsi_param_list *src_param_list, struct iscsi_param_list *src_param_list,
int leading) int leading)
{ {
struct iscsi_param *new_param = NULL, *param = NULL; struct iscsi_param *param = NULL;
struct iscsi_param *new_param = NULL;
struct iscsi_param_list *param_list = NULL; struct iscsi_param_list *param_list = NULL;
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!param_list) { if (!param_list) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
" struct iscsi_param_list.\n");
goto err_out; goto err_out;
} }
INIT_LIST_HEAD(&param_list->param_list); INIT_LIST_HEAD(&param_list->param_list);
...@@ -567,8 +567,17 @@ int iscsi_copy_param_list( ...@@ -567,8 +567,17 @@ int iscsi_copy_param_list(
new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
if (!new_param) { if (!new_param) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for struct iscsi_param.\n");
" struct iscsi_param.\n"); goto err_out;
}
new_param->name = kstrdup(param->name, GFP_KERNEL);
new_param->value = kstrdup(param->value, GFP_KERNEL);
if (!new_param->value || !new_param->name) {
kfree(new_param->value);
kfree(new_param->name);
kfree(new_param);
pr_err("Unable to allocate memory for parameter name/value.\n");
goto err_out; goto err_out;
} }
...@@ -580,32 +589,12 @@ int iscsi_copy_param_list( ...@@ -580,32 +589,12 @@ int iscsi_copy_param_list(
new_param->use = param->use; new_param->use = param->use;
new_param->type_range = param->type_range; new_param->type_range = param->type_range;
new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
if (!new_param->name) {
pr_err("Unable to allocate memory for"
" parameter name.\n");
goto err_out;
}
new_param->value = kzalloc(strlen(param->value) + 1,
GFP_KERNEL);
if (!new_param->value) {
pr_err("Unable to allocate memory for"
" parameter value.\n");
goto err_out;
}
memcpy(new_param->name, param->name, strlen(param->name));
new_param->name[strlen(param->name)] = '\0';
memcpy(new_param->value, param->value, strlen(param->value));
new_param->value[strlen(param->value)] = '\0';
list_add_tail(&new_param->p_list, &param_list->param_list); list_add_tail(&new_param->p_list, &param_list->param_list);
} }
if (!list_empty(&param_list->param_list)) if (!list_empty(&param_list->param_list)) {
*dst_param_list = param_list; *dst_param_list = param_list;
else { } else {
pr_err("No parameters allocated.\n"); pr_err("No parameters allocated.\n");
goto err_out; goto err_out;
} }
......
...@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( ...@@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
if (!cmd->tmr_req) { if (!cmd->tmr_req) {
pr_err("Unable to allocate memory for" pr_err("Unable to allocate memory for"
" Task Management command!\n"); " Task Management command!\n");
return NULL; goto out;
} }
/* /*
* TASK_REASSIGN for ERL=2 / connection stays inside of * TASK_REASSIGN for ERL=2 / connection stays inside of
...@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( ...@@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
return cmd; return cmd;
out: out:
iscsit_release_cmd(cmd); iscsit_release_cmd(cmd);
if (se_cmd)
transport_free_se_cmd(se_cmd);
return NULL; return NULL;
} }
......
...@@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) ...@@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
{ {
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
unsigned char *buf; unsigned char *buf;
/* /*
...@@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) ...@@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
buf = transport_kmap_first_data_page(cmd); buf = transport_kmap_first_data_page(cmd);
buf[0] = dev->transport->get_device_type(dev); if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
if (buf[0] == TYPE_TAPE) buf[0] = 0x3f; /* Not connected */
buf[1] = 0x80; } else {
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
}
buf[2] = dev->transport->get_device_rev(dev); buf[2] = dev->transport->get_device_rev(dev);
/* /*
...@@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) ...@@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]); length += target_modesense_control(dev, &buf[offset+length]);
break; break;
default: default:
pr_err("Got Unknown Mode Page: 0x%02x\n", pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f); cdb[2] & 0x3f, cdb[3]);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
} }
offset += length; offset += length;
...@@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task) ...@@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task)
size -= 16; size -= 16;
} }
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
err: err:
transport_kunmap_first_data_page(cmd); transport_kunmap_first_data_page(cmd);
...@@ -1085,24 +1088,17 @@ target_emulate_unmap(struct se_task *task) ...@@ -1085,24 +1088,17 @@ target_emulate_unmap(struct se_task *task)
* Note this is not used for TCM/pSCSI passthrough * Note this is not used for TCM/pSCSI passthrough
*/ */
static int static int
target_emulate_write_same(struct se_task *task, int write_same32) target_emulate_write_same(struct se_task *task, u32 num_blocks)
{ {
struct se_cmd *cmd = task->task_se_cmd; struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
sector_t range; sector_t range;
sector_t lba = cmd->t_task_lba; sector_t lba = cmd->t_task_lba;
unsigned int num_blocks;
int ret; int ret;
/* /*
* Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict * Use the explicit range when non zero is supplied, otherwise calculate
* range when non zero is supplied, otherwise calculate the remaining * the remaining range based on ->get_blocks() - starting LBA.
* range based on ->get_blocks() - starting LBA.
*/ */
if (write_same32)
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
else
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
if (num_blocks != 0) if (num_blocks != 0)
range = num_blocks; range = num_blocks;
else else
...@@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32) ...@@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
return ret; return ret;
} }
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0; return 0;
} }
...@@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task)
} }
ret = target_emulate_unmap(task); ret = target_emulate_unmap(task);
break; break;
case WRITE_SAME:
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task,
get_unaligned_be16(&cmd->t_task_cdb[7]));
break;
case WRITE_SAME_16: case WRITE_SAME_16:
if (!dev->transport->do_discard) { if (!dev->transport->do_discard) {
pr_err("WRITE_SAME_16 emulation not supported" pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name); " for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
ret = target_emulate_write_same(task, 0); ret = target_emulate_write_same(task,
get_unaligned_be32(&cmd->t_task_cdb[10]));
break; break;
case VARIABLE_LENGTH_CMD: case VARIABLE_LENGTH_CMD:
service_action = service_action =
...@@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task)
dev->transport->name); dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
} }
ret = target_emulate_write_same(task, 1); ret = target_emulate_write_same(task,
get_unaligned_be32(&cmd->t_task_cdb[28]));
break; break;
default: default:
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
...@@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task) ...@@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task)
if (ret < 0) if (ret < 0)
return ret; return ret;
task->task_scsi_status = GOOD; /*
transport_complete_task(task, 1); * Handle the successful completion here unless a caller
* has explictly requested an asychronous completion.
*/
if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT; return PYX_TRANSPORT_SENT_TO_TRANSPORT;
} }
...@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
struct se_dev_entry *deve; struct se_dev_entry *deve;
u32 i; u32 i;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
...@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
} }
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
} }
static struct se_port *core_alloc_port(struct se_device *dev) static struct se_port *core_alloc_port(struct se_device *dev)
...@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev) ...@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
return ret; return ret;
} }
u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 tmp, aligned_max_sectors;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
aligned_max_sectors = (tmp / block_size);
if (max_sectors != aligned_max_sectors) {
printk(KERN_INFO "Rounding down aligned max_sectors from %u"
" to %u\n", max_sectors, aligned_max_sectors);
return aligned_max_sectors;
}
return max_sectors;
}
void se_dev_set_default_attribs( void se_dev_set_default_attribs(
struct se_device *dev, struct se_device *dev,
struct se_dev_limits *dev_limits) struct se_dev_limits *dev_limits)
...@@ -878,6 +896,11 @@ void se_dev_set_default_attribs( ...@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
* max_sectors is based on subsystem plugin dependent requirements. * max_sectors is based on subsystem plugin dependent requirements.
*/ */
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
limits->logical_block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/* /*
* Set optimal_sectors from max_sectors, which can be lowered via * Set optimal_sectors from max_sectors, which can be lowered via
...@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) ...@@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
return -EINVAL; return -EINVAL;
} }
} }
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
max_sectors = se_dev_align_max_sectors(max_sectors,
dev->se_sub_dev->se_dev_attrib.block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
...@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun( ...@@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
*/ */
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl; struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl &&
spin_unlock_bh(&tpg->acl_node_lock); (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
spin_unlock_irq(&tpg->acl_node_lock);
core_tpg_add_node_to_devs(acl, tpg); core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
} }
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
} }
return lun_p; return lun_p;
......
...@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl( ...@@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
if (IS_ERR(se_nacl)) if (IS_ERR(se_nacl))
return ERR_PTR(PTR_ERR(se_nacl)); return ERR_CAST(se_nacl);
nacl_cg = &se_nacl->acl_group; nacl_cg = &se_nacl->acl_group;
nacl_cg->default_groups = se_nacl->acl_default_groups; nacl_cg->default_groups = se_nacl->acl_default_groups;
......
...@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port( ...@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
* from the decoded fabric module specific TransportID * from the decoded fabric module specific TransportID
* at *i_str. * at *i_str.
*/ */
spin_lock_bh(&tmp_tpg->acl_node_lock); spin_lock_irq(&tmp_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl( dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str); tmp_tpg, i_str);
if (dest_node_acl) { if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
} }
spin_unlock_bh(&tmp_tpg->acl_node_lock); spin_unlock_irq(&tmp_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg); core_scsi3_tpg_undepend_item(tmp_tpg);
...@@ -3496,14 +3496,14 @@ static int core_scsi3_emulate_pro_register_and_move( ...@@ -3496,14 +3496,14 @@ static int core_scsi3_emulate_pro_register_and_move(
/* /*
* Locate the destination struct se_node_acl from the received Transport ID * Locate the destination struct se_node_acl from the received Transport ID
*/ */
spin_lock_bh(&dest_se_tpg->acl_node_lock); spin_lock_irq(&dest_se_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str); initiator_str);
if (dest_node_acl) { if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
} }
spin_unlock_bh(&dest_se_tpg->acl_node_lock); spin_unlock_irq(&dest_se_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for" pr_err("Unable to locate %s dest_node_acl for"
......
...@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req) ...@@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
length = req->rd_size; length = req->rd_size;
dst = sg_virt(&sg_d[i++]) + dst_offset; dst = sg_virt(&sg_d[i++]) + dst_offset;
if (!dst) BUG_ON(!dst);
BUG();
src = sg_virt(&sg_s[j]) + src_offset; src = sg_virt(&sg_s[j]) + src_offset;
if (!src) BUG_ON(!src);
BUG();
dst_offset = 0; dst_offset = 0;
src_offset = length; src_offset = length;
...@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req) ...@@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
length = req->rd_size; length = req->rd_size;
dst = sg_virt(&sg_d[i]) + dst_offset; dst = sg_virt(&sg_d[i]) + dst_offset;
if (!dst) BUG_ON(!dst);
BUG();
if (sg_d[i].length == length) { if (sg_d[i].length == length) {
i++; i++;
...@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req) ...@@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
dst_offset = length; dst_offset = length;
src = sg_virt(&sg_s[j++]) + src_offset; src = sg_virt(&sg_s[j++]) + src_offset;
if (!src) BUG_ON(!src);
BUG();
src_offset = 0; src_offset = 0;
page_end = 1; page_end = 1;
...@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req) ...@@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
length = req->rd_size; length = req->rd_size;
src = sg_virt(&sg_s[i++]) + src_offset; src = sg_virt(&sg_s[i++]) + src_offset;
if (!src) BUG_ON(!src);
BUG();
dst = sg_virt(&sg_d[j]) + dst_offset; dst = sg_virt(&sg_d[j]) + dst_offset;
if (!dst) BUG_ON(!dst);
BUG();
src_offset = 0; src_offset = 0;
dst_offset = length; dst_offset = length;
...@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req) ...@@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
length = req->rd_size; length = req->rd_size;
src = sg_virt(&sg_s[i]) + src_offset; src = sg_virt(&sg_s[i]) + src_offset;
if (!src) BUG_ON(!src);
BUG();
if (sg_s[i].length == length) { if (sg_s[i].length == length) {
i++; i++;
...@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req) ...@@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
src_offset = length; src_offset = length;
dst = sg_virt(&sg_d[j++]) + dst_offset; dst = sg_virt(&sg_d[j++]) + dst_offset;
if (!dst) BUG_ON(!dst);
BUG();
dst_offset = 0; dst_offset = 0;
page_end = 1; page_end = 1;
......
...@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( ...@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
{ {
struct se_node_acl *acl; struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname) && if (!strcmp(acl->initiatorname, initiatorname) &&
!acl->dynamic_node_acl) { !acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return acl; return acl;
} }
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return NULL; return NULL;
} }
...@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( ...@@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL; return NULL;
} }
/*
* Here we only create demo-mode MappedLUNs from the active
* TPG LUNs if the fabric is not explictly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
do { ; } while (0);
else
core_tpg_add_node_to_devs(acl, tpg);
core_tpg_add_node_to_devs(acl, tpg); spin_lock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list); list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++; tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
...@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
{ {
struct se_node_acl *acl = NULL; struct se_node_acl *acl = NULL;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) { if (acl) {
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
...@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
pr_debug("%s_TPG[%u] - Replacing dynamic ACL" pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(), " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
/* /*
* Release the locally allocated struct se_node_acl * Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned * because * core_tpg_add_initiator_node_acl() returned
...@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
" Node %s already exists for TPG %u, ignoring" " Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(), " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
if (!se_nacl) { if (!se_nacl) {
pr_err("struct se_node_acl pointer is NULL\n"); pr_err("struct se_node_acl pointer is NULL\n");
...@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list); list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++; tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
done: done:
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
...@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl( ...@@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
struct se_session *sess, *sess_tmp; struct se_session *sess, *sess_tmp;
int dynamic_acl = 0; int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0; acl->dynamic_node_acl = 0;
dynamic_acl = 1; dynamic_acl = 1;
} }
list_del(&acl->acl_list); list_del(&acl->acl_list);
tpg->num_node_acls--; tpg->num_node_acls--;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock); spin_lock_bh(&tpg->session_lock);
list_for_each_entry_safe(sess, sess_tmp, list_for_each_entry_safe(sess, sess_tmp,
...@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl; struct se_node_acl *acl;
int dynamic_acl = 0; int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) { if (!acl) {
pr_err("Access Control List entry for %s Initiator" pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring" " Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(), " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -ENODEV; return -ENODEV;
} }
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0; acl->dynamic_node_acl = 0;
dynamic_acl = 1; dynamic_acl = 1;
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock); spin_lock_bh(&tpg->session_lock);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
...@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
tpg->se_tpg_tfo->get_fabric_name(), initiatorname); tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock); spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -EEXIST; return -EEXIST;
} }
/* /*
...@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess) if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess); tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&tpg->session_lock); spin_unlock_bh(&tpg->session_lock);
...@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
initiatorname, tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg)); tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return 0; return 0;
} }
...@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) ...@@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* not been released because of TFO->tpg_check_demo_mode_cache() == 1 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session(). * in transport_deregister_session().
*/ */
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
acl_list) { acl_list) {
list_del(&nacl->acl_list); list_del(&nacl->acl_list);
se_tpg->num_node_acls--; se_tpg->num_node_acls--;
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(nacl); core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg); core_free_device_list_for_node(nacl, se_tpg);
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
} }
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg); core_tpg_release_virtual_lun0(se_tpg);
......
This diff is collapsed.
...@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) ...@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
struct se_portal_group *se_tpg = &tpg->se_tpg; struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl; struct se_node_acl *se_acl;
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl); acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n", pr_debug("acl %p port_name %llx\n",
...@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) ...@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
break; break;
} }
} }
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
return found; return found;
} }
...@@ -655,9 +655,7 @@ static void __exit ft_exit(void) ...@@ -655,9 +655,7 @@ static void __exit ft_exit(void)
synchronize_rcu(); synchronize_rcu();
} }
#ifdef MODULE
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(ft_init); module_init(ft_init);
module_exit(ft_exit); module_exit(ft_exit);
#endif /* MODULE */
...@@ -27,6 +27,12 @@ struct target_core_fabric_ops { ...@@ -27,6 +27,12 @@ struct target_core_fabric_ops {
int (*tpg_check_demo_mode_cache)(struct se_portal_group *); int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
/*
* Optionally used by fabrics to allow demo-mode login, but not
* expose any TPG LUNs, and return 'not connected' in standard
* inquiry response
*/
int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
struct se_node_acl *(*tpg_alloc_fabric_acl)( struct se_node_acl *(*tpg_alloc_fabric_acl)(
struct se_portal_group *); struct se_portal_group *);
void (*tpg_release_fabric_acl)(struct se_portal_group *, void (*tpg_release_fabric_acl)(struct se_portal_group *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment